var/home/core/zuul-output/0000755000175000017500000000000015111274527014532 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111311221015455 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005620066215111311211017667 0ustar rootrootNov 25 09:44:10 crc systemd[1]: Starting Kubernetes Kubelet... Nov 25 09:44:10 crc restorecon[4693]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:10 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:44:11 crc restorecon[4693]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 09:44:11 crc restorecon[4693]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 25 09:44:11 crc kubenswrapper[4769]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:44:11 crc kubenswrapper[4769]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 25 09:44:11 crc kubenswrapper[4769]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:44:11 crc kubenswrapper[4769]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:44:11 crc kubenswrapper[4769]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 25 09:44:11 crc kubenswrapper[4769]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.896639 4769 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904726 4769 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904776 4769 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904788 4769 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904799 4769 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904809 4769 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904824 4769 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904836 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904847 4769 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904857 4769 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904867 4769 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904876 4769 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904886 4769 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904897 4769 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904906 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904916 4769 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904925 4769 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904936 4769 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904945 4769 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904956 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.904997 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905017 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905026 4769 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905036 4769 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905046 4769 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905056 4769 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905067 4769 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905076 4769 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905087 4769 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905097 4769 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905107 4769 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905117 4769 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905127 4769 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905138 4769 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905147 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905158 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905167 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905177 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905187 4769 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905197 4769 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905208 4769 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905218 4769 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905227 4769 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905241 4769 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905255 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905268 4769 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905282 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905294 4769 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905304 4769 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905315 4769 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905325 4769 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905336 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905347 4769 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905356 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905366 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905376 4769 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905386 4769 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905396 4769 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905406 4769 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905455 4769 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905468 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905480 4769 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905491 4769 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905502 4769 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905518 4769 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905529 4769 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905540 4769 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905552 4769 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905566 4769 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905579 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905591 4769 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.905605 4769 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.905812 4769 flags.go:64] FLAG: --address="0.0.0.0" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.905834 4769 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.905855 4769 flags.go:64] FLAG: --anonymous-auth="true" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.905870 4769 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.905884 4769 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.905896 4769 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.905911 4769 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.905924 4769 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.905935 4769 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.905944 4769 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.905953 4769 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906000 4769 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906012 4769 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906023 4769 flags.go:64] FLAG: --cgroup-root="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906034 4769 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906045 4769 flags.go:64] FLAG: --client-ca-file="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906056 4769 flags.go:64] FLAG: --cloud-config="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906068 4769 flags.go:64] FLAG: --cloud-provider="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906078 4769 flags.go:64] FLAG: --cluster-dns="[]" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906092 4769 flags.go:64] FLAG: --cluster-domain="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906103 4769 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906114 4769 flags.go:64] FLAG: --config-dir="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906125 4769 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906137 4769 flags.go:64] FLAG: --container-log-max-files="5" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906151 4769 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906161 4769 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906170 4769 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906180 4769 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906189 4769 flags.go:64] FLAG: --contention-profiling="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906198 4769 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906209 4769 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906219 4769 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906228 4769 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906239 4769 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906249 4769 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906258 4769 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906268 4769 flags.go:64] FLAG: --enable-load-reader="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906277 4769 flags.go:64] FLAG: --enable-server="true" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906286 4769 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906298 4769 flags.go:64] FLAG: --event-burst="100" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906307 4769 flags.go:64] FLAG: --event-qps="50" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906316 4769 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906325 4769 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906335 4769 flags.go:64] FLAG: --eviction-hard="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906346 4769 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906355 4769 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906364 4769 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906373 4769 flags.go:64] FLAG: --eviction-soft="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906383 4769 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906392 4769 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906401 4769 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906410 4769 flags.go:64] FLAG: --experimental-mounter-path="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906419 4769 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906428 4769 flags.go:64] FLAG: --fail-swap-on="true" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906437 4769 flags.go:64] FLAG: --feature-gates="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906447 4769 flags.go:64] FLAG: --file-check-frequency="20s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906457 4769 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906466 4769 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906475 4769 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906485 4769 flags.go:64] FLAG: --healthz-port="10248" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906494 4769 flags.go:64] FLAG: --help="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906504 4769 flags.go:64] FLAG: --hostname-override="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906513 4769 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906538 4769 flags.go:64] FLAG: --http-check-frequency="20s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906575 4769 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906584 4769 flags.go:64] FLAG: --image-credential-provider-config="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906593 4769 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906602 4769 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906611 4769 flags.go:64] FLAG: --image-service-endpoint="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906620 4769 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906628 4769 flags.go:64] FLAG: --kube-api-burst="100" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906637 4769 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906647 4769 flags.go:64] FLAG: --kube-api-qps="50" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906655 4769 flags.go:64] FLAG: --kube-reserved="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906664 4769 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906673 4769 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906683 4769 flags.go:64] FLAG: --kubelet-cgroups="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906692 4769 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906701 4769 flags.go:64] FLAG: --lock-file="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906711 4769 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906721 4769 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906730 4769 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906744 4769 flags.go:64] FLAG: --log-json-split-stream="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906753 4769 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906761 4769 flags.go:64] FLAG: --log-text-split-stream="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906770 4769 flags.go:64] FLAG: --logging-format="text" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906779 4769 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906789 4769 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906798 4769 flags.go:64] FLAG: --manifest-url="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906807 4769 flags.go:64] FLAG: --manifest-url-header="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906818 4769 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906827 4769 flags.go:64] FLAG: --max-open-files="1000000" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906839 4769 flags.go:64] FLAG: --max-pods="110" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906848 4769 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906858 4769 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906867 4769 flags.go:64] FLAG: --memory-manager-policy="None" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906876 4769 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906885 4769 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906895 4769 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906904 4769 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906924 4769 flags.go:64] FLAG: --node-status-max-images="50" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906933 4769 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906942 4769 flags.go:64] FLAG: --oom-score-adj="-999" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906952 4769 flags.go:64] FLAG: --pod-cidr="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.906987 4769 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907001 4769 flags.go:64] FLAG: --pod-manifest-path="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907011 4769 flags.go:64] FLAG: --pod-max-pids="-1" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907020 4769 flags.go:64] FLAG: --pods-per-core="0" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907029 4769 flags.go:64] FLAG: --port="10250" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907038 4769 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907046 4769 flags.go:64] FLAG: --provider-id="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907055 4769 flags.go:64] FLAG: --qos-reserved="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907064 4769 flags.go:64] FLAG: --read-only-port="10255" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907073 4769 flags.go:64] FLAG: --register-node="true" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907082 4769 flags.go:64] FLAG: --register-schedulable="true" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907093 4769 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907108 4769 flags.go:64] FLAG: --registry-burst="10" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907117 4769 flags.go:64] FLAG: --registry-qps="5" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907127 4769 flags.go:64] FLAG: --reserved-cpus="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907135 4769 flags.go:64] FLAG: --reserved-memory="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907153 4769 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907162 4769 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907172 4769 flags.go:64] FLAG: --rotate-certificates="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907181 4769 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907190 4769 flags.go:64] FLAG: --runonce="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907199 4769 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907208 4769 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907217 4769 flags.go:64] FLAG: --seccomp-default="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907226 4769 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907235 4769 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907244 4769 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907253 4769 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907263 4769 flags.go:64] FLAG: --storage-driver-password="root" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907272 4769 flags.go:64] FLAG: --storage-driver-secure="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907281 4769 flags.go:64] FLAG: --storage-driver-table="stats" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907290 4769 flags.go:64] FLAG: --storage-driver-user="root" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907299 4769 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907308 4769 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907317 4769 flags.go:64] FLAG: --system-cgroups="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907326 4769 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907340 4769 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907349 4769 flags.go:64] FLAG: --tls-cert-file="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907357 4769 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907368 4769 flags.go:64] FLAG: --tls-min-version="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907377 4769 flags.go:64] FLAG: --tls-private-key-file="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907386 4769 flags.go:64] FLAG: --topology-manager-policy="none" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907394 4769 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907403 4769 flags.go:64] FLAG: --topology-manager-scope="container" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907412 4769 flags.go:64] FLAG: --v="2" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907423 4769 flags.go:64] FLAG: --version="false" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907434 4769 flags.go:64] FLAG: --vmodule="" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907446 4769 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.907455 4769 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907693 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907704 4769 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907713 4769 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907722 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907733 4769 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907742 4769 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907751 4769 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907759 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907768 4769 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907776 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907784 4769 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907792 4769 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907801 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907809 4769 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907817 4769 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907827 4769 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907837 4769 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907848 4769 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907858 4769 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907866 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907875 4769 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907883 4769 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907891 4769 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907899 4769 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907907 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907915 4769 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907922 4769 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907930 4769 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907938 4769 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907945 4769 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907953 4769 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907984 4769 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.907992 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908000 4769 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908009 4769 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908017 4769 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908025 4769 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908035 4769 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908046 4769 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908055 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908064 4769 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908072 4769 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908081 4769 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908091 4769 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908099 4769 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908107 4769 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908114 4769 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908122 4769 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908130 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908138 4769 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908145 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908153 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908161 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908171 4769 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908181 4769 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908190 4769 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908200 4769 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908209 4769 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908218 4769 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908226 4769 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908233 4769 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908241 4769 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908249 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908256 4769 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908264 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908272 4769 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908279 4769 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908287 4769 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908299 4769 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908308 4769 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.908316 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.909284 4769 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.919335 4769 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.919363 4769 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919419 4769 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919427 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919431 4769 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919434 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919438 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919442 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919445 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919449 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919453 4769 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919456 4769 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919460 4769 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919464 4769 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919471 4769 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919475 4769 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919479 4769 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919483 4769 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919486 4769 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919490 4769 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919493 4769 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919498 4769 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919502 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919506 4769 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919510 4769 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919513 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919517 4769 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919521 4769 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919524 4769 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919528 4769 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919532 4769 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919536 4769 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919540 4769 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919544 4769 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919548 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919552 4769 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919556 4769 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919560 4769 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919564 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919567 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919571 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919574 4769 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919577 4769 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919581 4769 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919584 4769 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919588 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919591 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919595 4769 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919598 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919602 4769 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919605 4769 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919608 4769 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919612 4769 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919616 4769 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919620 4769 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919625 4769 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919629 4769 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919632 4769 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919636 4769 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919640 4769 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919643 4769 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919647 4769 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919651 4769 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919654 4769 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919657 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919661 4769 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919665 4769 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919668 4769 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919672 4769 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919675 4769 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919679 4769 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919682 4769 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919686 4769 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.919692 4769 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919794 4769 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919801 4769 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919805 4769 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919809 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919813 4769 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919817 4769 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919820 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919824 4769 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919828 4769 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919834 4769 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919839 4769 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919843 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919847 4769 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919852 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919856 4769 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919859 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919863 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919866 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919870 4769 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919873 4769 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919877 4769 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919880 4769 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919884 4769 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919887 4769 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919890 4769 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919894 4769 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919897 4769 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919901 4769 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919904 4769 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919908 4769 feature_gate.go:330] unrecognized feature gate: Example Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919911 4769 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919915 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919918 4769 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919922 4769 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919926 4769 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919929 4769 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919933 4769 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919936 4769 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919940 4769 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919943 4769 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919947 4769 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919952 4769 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919956 4769 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919975 4769 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919979 4769 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919983 4769 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919987 4769 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919992 4769 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.919997 4769 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920001 4769 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920004 4769 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920008 4769 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920012 4769 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920016 4769 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920020 4769 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920024 4769 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920027 4769 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920031 4769 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920035 4769 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920039 4769 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920042 4769 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920046 4769 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920049 4769 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920053 4769 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920057 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920060 4769 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920063 4769 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920067 4769 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920070 4769 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920074 4769 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 09:44:11 crc kubenswrapper[4769]: W1125 09:44:11.920078 4769 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.920084 4769 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.920973 4769 server.go:940] "Client rotation is on, will bootstrap in background" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.924475 4769 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.924561 4769 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.926591 4769 server.go:997] "Starting client certificate rotation" Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.926611 4769 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.926786 4769 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-03 08:57:31.938369397 +0000 UTC Nov 25 09:44:11 crc kubenswrapper[4769]: I1125 09:44:11.926869 4769 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 935h13m20.011502413s for next certificate rotation Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.021125 4769 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.030296 4769 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.053036 4769 log.go:25] "Validated CRI v1 runtime API" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.127342 4769 log.go:25] "Validated CRI v1 image API" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.131308 4769 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.137924 4769 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-25-09-38-30-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.138007 4769 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.154148 4769 manager.go:217] Machine: {Timestamp:2025-11-25 09:44:12.152285888 +0000 UTC m=+0.737258211 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654132736 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:fb8d0e0c-b27f-49f2-81a7-fe75c397959e BootID:2e5db0ee-b4dd-41e7-b399-6918209aec97 Filesystems:[{Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730829824 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827068416 Type:vfs Inodes:1048576 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:4a:85:7f Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:4a:85:7f Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:e1:e4:a8 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:ad:71:8b Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:b8:bf:89 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:a2:41:35 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:36:c4:ee:3a:e4:be Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:fa:77:18:dc:89:59 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654132736 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.154481 4769 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.154712 4769 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.155141 4769 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.155365 4769 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.155418 4769 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.156326 4769 topology_manager.go:138] "Creating topology manager with none policy" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.156354 4769 container_manager_linux.go:303] "Creating device plugin manager" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.157111 4769 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.157141 4769 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.157995 4769 state_mem.go:36] "Initialized new in-memory state store" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.158105 4769 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.161513 4769 kubelet.go:418] "Attempting to sync node with API server" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.161540 4769 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.161593 4769 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.161621 4769 kubelet.go:324] "Adding apiserver pod source" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.161906 4769 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.168203 4769 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.169337 4769 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 25 09:44:12 crc kubenswrapper[4769]: W1125 09:44:12.171252 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.171470 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.201:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:44:12 crc kubenswrapper[4769]: W1125 09:44:12.171371 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.172220 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.201:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.173801 4769 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.175137 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.175173 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.175184 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.175193 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.175206 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.175215 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.175225 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.175240 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.175250 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.175259 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.175274 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.175283 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.176180 4769 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.176812 4769 server.go:1280] "Started kubelet" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.177058 4769 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:12 crc systemd[1]: Started Kubernetes Kubelet. Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.179300 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.179337 4769 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.179426 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 03:04:47.518407014 +0000 UTC Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.179541 4769 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 41h20m35.338874483s for next certificate rotation Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.179550 4769 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.179582 4769 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.179699 4769 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.179521 4769 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.180052 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" interval="200ms" Nov 25 09:44:12 crc kubenswrapper[4769]: W1125 09:44:12.180107 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.180174 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.201:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.180737 4769 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.180756 4769 factory.go:55] Registering systemd factory Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.180766 4769 factory.go:221] Registration of the systemd container factory successfully Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.180808 4769 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.180875 4769 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.181490 4769 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.183373 4769 server.go:460] "Adding debug handlers to kubelet server" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.183903 4769 factory.go:153] Registering CRI-O factory Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.183938 4769 factory.go:221] Registration of the crio container factory successfully Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.183991 4769 factory.go:103] Registering Raw factory Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.184012 4769 manager.go:1196] Started watching for new ooms in manager Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.184594 4769 manager.go:319] Starting recovery of all containers Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.191541 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.191644 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.191672 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.191705 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.191726 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.191754 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.191775 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.191798 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.191833 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.191863 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.186233 4769 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.201:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b36ba261aac41 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 09:44:12.176780353 +0000 UTC m=+0.761752666,LastTimestamp:2025-11-25 09:44:12.176780353 +0000 UTC m=+0.761752666,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.193955 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194026 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194045 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194077 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194091 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194115 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194131 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194152 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194166 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194183 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194205 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194220 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194240 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194257 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194273 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194295 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194316 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194338 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194361 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194376 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194390 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194412 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194430 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194454 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194475 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194492 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194512 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194530 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194555 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194571 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.194588 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197204 4769 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197303 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197326 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197345 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197411 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197504 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197551 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197640 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197701 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197732 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197786 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197817 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197872 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.197916 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198024 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198083 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198128 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198172 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198210 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198253 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198284 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198311 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198434 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198471 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198516 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198557 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198587 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198639 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198689 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198748 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198797 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198838 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198907 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.198950 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.199034 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.199068 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.199137 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.199207 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.199248 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.199290 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.199331 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.199361 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200398 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200466 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200487 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200505 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200528 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200549 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200565 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200577 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200589 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200600 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200612 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200623 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200633 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200694 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200706 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200717 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200733 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200744 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200755 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200766 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200776 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200786 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200806 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200822 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200835 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200847 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200858 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200869 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200882 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200892 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200904 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200915 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200925 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200935 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200946 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200956 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200978 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200987 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.200996 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201017 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201027 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201037 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201048 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201059 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201070 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201110 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201121 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201132 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201143 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201154 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201164 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201174 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201184 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201195 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201237 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201276 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201290 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201303 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201317 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201333 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201348 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201363 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201378 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201392 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201404 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201417 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201430 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201443 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201457 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201472 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201486 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201500 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201512 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201524 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201536 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201554 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201567 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201581 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201595 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201608 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201622 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201637 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201652 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201667 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201683 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201697 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201711 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201725 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201740 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201755 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201770 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201789 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201804 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201818 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201831 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201846 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201860 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201879 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201895 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201910 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201924 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201938 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201955 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.201989 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202004 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202020 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202036 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202053 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202066 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202080 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202095 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202110 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202125 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202140 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202155 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202169 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202184 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202197 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202211 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202226 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202239 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202253 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202268 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202285 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202299 4769 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202311 4769 reconstruct.go:97] "Volume reconstruction finished" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.202322 4769 reconciler.go:26] "Reconciler: start to sync state" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.217056 4769 manager.go:324] Recovery completed Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.230907 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.233243 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.233289 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.233301 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.233283 4769 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.234134 4769 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.234181 4769 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.234220 4769 state_mem.go:36] "Initialized new in-memory state store" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.235526 4769 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.235576 4769 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.235598 4769 kubelet.go:2335] "Starting kubelet main sync loop" Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.235749 4769 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 25 09:44:12 crc kubenswrapper[4769]: W1125 09:44:12.236121 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.236177 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.201:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.260280 4769 policy_none.go:49] "None policy: Start" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.261528 4769 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.261609 4769 state_mem.go:35] "Initializing new in-memory state store" Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.280037 4769 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.303983 4769 manager.go:334] "Starting Device Plugin manager" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.304241 4769 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.304264 4769 server.go:79] "Starting device plugin registration server" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.304669 4769 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.304699 4769 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.304931 4769 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.305129 4769 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.305229 4769 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.313704 4769 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.335925 4769 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.336054 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.337777 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.337877 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.337897 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.338163 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.338281 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.338312 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.339444 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.339466 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.339474 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.339825 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.339865 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.339886 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.340088 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.340211 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.340234 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.341095 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.341140 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.341150 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.341226 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.341415 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.341488 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.341666 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.341713 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.341734 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.342565 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.342599 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.342607 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.342758 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.342767 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.342775 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.342870 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.343118 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.343186 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.344492 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.344530 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.344559 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.344572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.344534 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.344616 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.344805 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.344831 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.347001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.347027 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.347037 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.380579 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" interval="400ms" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404188 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404238 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404260 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404279 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404299 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404315 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404330 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404345 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404369 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404388 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404405 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404422 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404440 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404457 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404490 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.404849 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.406154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.406189 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.406205 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.406232 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.406554 4769 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.201:6443: connect: connection refused" node="crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506021 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506073 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506092 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506108 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506122 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506135 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506149 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506163 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506176 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506190 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506206 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506218 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506232 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506245 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506252 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506277 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506312 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506335 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506374 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506271 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506397 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506405 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506294 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506435 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506450 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506612 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506638 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506620 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506676 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.506673 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.607057 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.608682 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.608752 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.608763 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.608798 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.609483 4769 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.201:6443: connect: connection refused" node="crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.667134 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.673524 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.696262 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.704556 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: I1125 09:44:12.708709 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:44:12 crc kubenswrapper[4769]: W1125 09:44:12.718866 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-24a38e173c15fadc1a3add18588827f737e3067c9f4e2ceb2acd0af5aa9d02db WatchSource:0}: Error finding container 24a38e173c15fadc1a3add18588827f737e3067c9f4e2ceb2acd0af5aa9d02db: Status 404 returned error can't find the container with id 24a38e173c15fadc1a3add18588827f737e3067c9f4e2ceb2acd0af5aa9d02db Nov 25 09:44:12 crc kubenswrapper[4769]: W1125 09:44:12.722382 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-518573058e318810e8ed6b48041d619be3c92549db8cc92e18384011044ace21 WatchSource:0}: Error finding container 518573058e318810e8ed6b48041d619be3c92549db8cc92e18384011044ace21: Status 404 returned error can't find the container with id 518573058e318810e8ed6b48041d619be3c92549db8cc92e18384011044ace21 Nov 25 09:44:12 crc kubenswrapper[4769]: W1125 09:44:12.735459 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-f55a382e9f6ac4fa8dc32e5c7d5abfecfef099fe03938c1d282f2a188f5c57b8 WatchSource:0}: Error finding container f55a382e9f6ac4fa8dc32e5c7d5abfecfef099fe03938c1d282f2a188f5c57b8: Status 404 returned error can't find the container with id f55a382e9f6ac4fa8dc32e5c7d5abfecfef099fe03938c1d282f2a188f5c57b8 Nov 25 09:44:12 crc kubenswrapper[4769]: W1125 09:44:12.739182 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-a8cf8089b8c6321037337dfc0579d7f0be024e6c218f514ae11e0903654b65e0 WatchSource:0}: Error finding container a8cf8089b8c6321037337dfc0579d7f0be024e6c218f514ae11e0903654b65e0: Status 404 returned error can't find the container with id a8cf8089b8c6321037337dfc0579d7f0be024e6c218f514ae11e0903654b65e0 Nov 25 09:44:12 crc kubenswrapper[4769]: E1125 09:44:12.781523 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" interval="800ms" Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.010607 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.012579 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.012629 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.012640 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.012672 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:44:13 crc kubenswrapper[4769]: E1125 09:44:13.013285 4769 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.201:6443: connect: connection refused" node="crc" Nov 25 09:44:13 crc kubenswrapper[4769]: W1125 09:44:13.017701 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:13 crc kubenswrapper[4769]: E1125 09:44:13.017796 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.201:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:44:13 crc kubenswrapper[4769]: W1125 09:44:13.111022 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:13 crc kubenswrapper[4769]: E1125 09:44:13.111126 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.201:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.178115 4769 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.241851 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"24a38e173c15fadc1a3add18588827f737e3067c9f4e2ceb2acd0af5aa9d02db"} Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.243317 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"f55a382e9f6ac4fa8dc32e5c7d5abfecfef099fe03938c1d282f2a188f5c57b8"} Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.244616 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a8cf8089b8c6321037337dfc0579d7f0be024e6c218f514ae11e0903654b65e0"} Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.246070 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"652c67a42202e4e8095a904e648ef22ef9692d7b38a06b30c2d5d96d826301d9"} Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.247200 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"518573058e318810e8ed6b48041d619be3c92549db8cc92e18384011044ace21"} Nov 25 09:44:13 crc kubenswrapper[4769]: W1125 09:44:13.289572 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:13 crc kubenswrapper[4769]: E1125 09:44:13.289994 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.201:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:44:13 crc kubenswrapper[4769]: W1125 09:44:13.342040 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:13 crc kubenswrapper[4769]: E1125 09:44:13.342158 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.201:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:44:13 crc kubenswrapper[4769]: E1125 09:44:13.583068 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" interval="1.6s" Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.813815 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.815911 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.815982 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.815999 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:13 crc kubenswrapper[4769]: I1125 09:44:13.816032 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:44:13 crc kubenswrapper[4769]: E1125 09:44:13.816653 4769 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.201:6443: connect: connection refused" node="crc" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.178671 4769 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.252695 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9" exitCode=0 Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.252829 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9"} Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.252864 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.254088 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.254140 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.254155 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.255876 4769 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="7def6cd379c7f5d75dc1bf2c67a6a758ac8565e366f0f907903566232b7746a5" exitCode=0 Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.255983 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"7def6cd379c7f5d75dc1bf2c67a6a758ac8565e366f0f907903566232b7746a5"} Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.256032 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.256291 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.257184 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.257207 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.257210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.257241 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.257254 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.257219 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.258420 4769 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72" exitCode=0 Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.258490 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72"} Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.258575 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.259808 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.259856 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.259875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.261826 4769 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172" exitCode=0 Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.261922 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172"} Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.262073 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.263362 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.263407 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.263428 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.265654 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63"} Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.265709 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b"} Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.265731 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290"} Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.265750 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde"} Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.265757 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.266768 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.266825 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:14 crc kubenswrapper[4769]: I1125 09:44:14.266841 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:15 crc kubenswrapper[4769]: W1125 09:44:15.038283 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:15 crc kubenswrapper[4769]: E1125 09:44:15.038427 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.201:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:44:15 crc kubenswrapper[4769]: W1125 09:44:15.174804 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:15 crc kubenswrapper[4769]: E1125 09:44:15.174879 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.201:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.178914 4769 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:15 crc kubenswrapper[4769]: E1125 09:44:15.183924 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" interval="3.2s" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.270530 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"4f2bc66817eb90eafa6eae43756cc4389973a5d3939e464e42a4348515e430d0"} Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.270608 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.271609 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.271641 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.271653 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.273814 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"10cb7e7ffd92e069437d5588d6e1f2bcb3d776fe24abf845fa3554e8ef8ebacb"} Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.273843 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"1defc8279ef3e3bcb7b94e0ebecd9adfa114c27b4aba6b07fc9cce9933ac3eed"} Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.273853 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ea75adfbd1af3891c8c83253f0cc56e3b7f0eef396bd625d683d955d9732eba6"} Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.273873 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.275064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.275114 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.275129 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.277943 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291"} Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.278074 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24"} Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.278097 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a"} Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.278117 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96"} Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.281658 4769 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="72a577eb9b7ce279cb1d93202a80689e2e517349b6d3d88d83b71f8304bd7dd5" exitCode=0 Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.281717 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"72a577eb9b7ce279cb1d93202a80689e2e517349b6d3d88d83b71f8304bd7dd5"} Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.281829 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.281877 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.283161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.283252 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.283992 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.284007 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.283931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.284186 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:15 crc kubenswrapper[4769]: W1125 09:44:15.394241 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:15 crc kubenswrapper[4769]: E1125 09:44:15.394330 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.201:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.416806 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.417863 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.417918 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.417927 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.417949 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:44:15 crc kubenswrapper[4769]: E1125 09:44:15.418421 4769 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.201:6443: connect: connection refused" node="crc" Nov 25 09:44:15 crc kubenswrapper[4769]: W1125 09:44:15.643926 4769 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.201:6443: connect: connection refused Nov 25 09:44:15 crc kubenswrapper[4769]: E1125 09:44:15.644017 4769 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.201:6443: connect: connection refused" logger="UnhandledError" Nov 25 09:44:15 crc kubenswrapper[4769]: I1125 09:44:15.915377 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.291603 4769 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="e2c14f8876af9710666f03b36306acae7a4af61c305d2e1aac2ce6b414d37fe9" exitCode=0 Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.291727 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"e2c14f8876af9710666f03b36306acae7a4af61c305d2e1aac2ce6b414d37fe9"} Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.291808 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.293341 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.293387 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.293396 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.297188 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07"} Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.297279 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.297298 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.297332 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.298685 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.298764 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.298766 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.298807 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.298830 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.298779 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.298887 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.298951 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.298991 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.845773 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.845924 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.847153 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.847177 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:16 crc kubenswrapper[4769]: I1125 09:44:16.847184 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.304330 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ba2c29dce4dc4b1f79f8af2caa9e415b46e19b02e0ba3412ac21f28a33309251"} Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.304388 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"937cee08a2e3eaae95000066a24d21f537fe665a2ecde2f31632a6b33bd51572"} Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.304412 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"62b58345b1b2972c209332a824d83239d52dc4080dc56bc3910201ec6d80ce19"} Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.304430 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c52f6b6cbd891ac6ba040fc2e27c643e8e48795f82b943dd8b2d1f68744e4bd3"} Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.304453 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.304518 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.304569 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.305635 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.305693 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.305710 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.305920 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.306028 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:17 crc kubenswrapper[4769]: I1125 09:44:17.306057 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.310791 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"427f56858566336ea961bc9f474263907944de8c064e2222c2bfef173a85bd12"} Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.310806 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.310807 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.313859 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.313922 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.313936 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.314416 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.314448 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.314458 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.619441 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.620589 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.620633 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.620641 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:18 crc kubenswrapper[4769]: I1125 09:44:18.620668 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.313583 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.314749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.314803 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.314821 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.760716 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.760944 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.762873 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.762946 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.762993 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.787597 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.787864 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.789508 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.789552 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.789571 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:19 crc kubenswrapper[4769]: I1125 09:44:19.797884 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:20 crc kubenswrapper[4769]: I1125 09:44:20.316191 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:20 crc kubenswrapper[4769]: I1125 09:44:20.317167 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:20 crc kubenswrapper[4769]: I1125 09:44:20.317194 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:20 crc kubenswrapper[4769]: I1125 09:44:20.317204 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:20 crc kubenswrapper[4769]: I1125 09:44:20.756609 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:20 crc kubenswrapper[4769]: I1125 09:44:20.756784 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:20 crc kubenswrapper[4769]: I1125 09:44:20.757750 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:20 crc kubenswrapper[4769]: I1125 09:44:20.757785 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:20 crc kubenswrapper[4769]: I1125 09:44:20.757797 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:20 crc kubenswrapper[4769]: I1125 09:44:20.851475 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:21 crc kubenswrapper[4769]: I1125 09:44:21.319665 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:21 crc kubenswrapper[4769]: I1125 09:44:21.321067 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:21 crc kubenswrapper[4769]: I1125 09:44:21.321122 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:21 crc kubenswrapper[4769]: I1125 09:44:21.321134 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:22 crc kubenswrapper[4769]: I1125 09:44:22.080097 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 25 09:44:22 crc kubenswrapper[4769]: I1125 09:44:22.080414 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:22 crc kubenswrapper[4769]: I1125 09:44:22.081804 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:22 crc kubenswrapper[4769]: I1125 09:44:22.081848 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:22 crc kubenswrapper[4769]: I1125 09:44:22.081862 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:22 crc kubenswrapper[4769]: E1125 09:44:22.313848 4769 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 09:44:23 crc kubenswrapper[4769]: I1125 09:44:23.851549 4769 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 09:44:23 crc kubenswrapper[4769]: I1125 09:44:23.851625 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:44:23 crc kubenswrapper[4769]: I1125 09:44:23.862802 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:23 crc kubenswrapper[4769]: I1125 09:44:23.862946 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:23 crc kubenswrapper[4769]: I1125 09:44:23.863907 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:23 crc kubenswrapper[4769]: I1125 09:44:23.863937 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:23 crc kubenswrapper[4769]: I1125 09:44:23.863955 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:23 crc kubenswrapper[4769]: I1125 09:44:23.866904 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:24 crc kubenswrapper[4769]: I1125 09:44:24.328906 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:24 crc kubenswrapper[4769]: I1125 09:44:24.331532 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:24 crc kubenswrapper[4769]: I1125 09:44:24.331600 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:24 crc kubenswrapper[4769]: I1125 09:44:24.331617 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:25 crc kubenswrapper[4769]: I1125 09:44:25.950594 4769 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:55340->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 25 09:44:25 crc kubenswrapper[4769]: I1125 09:44:25.950712 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:55340->192.168.126.11:17697: read: connection reset by peer" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.110935 4769 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.111033 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.115170 4769 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.115370 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.337608 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.339791 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07" exitCode=255 Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.339858 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07"} Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.340083 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.341227 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.341278 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.341290 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.341986 4769 scope.go:117] "RemoveContainer" containerID="054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.773489 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.773817 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.775545 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.775595 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.775609 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:26 crc kubenswrapper[4769]: I1125 09:44:26.828040 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 25 09:44:27 crc kubenswrapper[4769]: I1125 09:44:27.091791 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 25 09:44:27 crc kubenswrapper[4769]: I1125 09:44:27.344913 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 09:44:27 crc kubenswrapper[4769]: I1125 09:44:27.347336 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469"} Nov 25 09:44:27 crc kubenswrapper[4769]: I1125 09:44:27.347481 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:27 crc kubenswrapper[4769]: I1125 09:44:27.347538 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:27 crc kubenswrapper[4769]: I1125 09:44:27.348914 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:27 crc kubenswrapper[4769]: I1125 09:44:27.348953 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:27 crc kubenswrapper[4769]: I1125 09:44:27.348984 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:27 crc kubenswrapper[4769]: I1125 09:44:27.348986 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:27 crc kubenswrapper[4769]: I1125 09:44:27.349022 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:27 crc kubenswrapper[4769]: I1125 09:44:27.349037 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:28 crc kubenswrapper[4769]: I1125 09:44:28.350178 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:28 crc kubenswrapper[4769]: I1125 09:44:28.352046 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:28 crc kubenswrapper[4769]: I1125 09:44:28.352098 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:28 crc kubenswrapper[4769]: I1125 09:44:28.352113 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:30 crc kubenswrapper[4769]: I1125 09:44:30.763367 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:30 crc kubenswrapper[4769]: I1125 09:44:30.763553 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:30 crc kubenswrapper[4769]: I1125 09:44:30.763767 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:30 crc kubenswrapper[4769]: I1125 09:44:30.764908 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:30 crc kubenswrapper[4769]: I1125 09:44:30.764939 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:30 crc kubenswrapper[4769]: I1125 09:44:30.764948 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:30 crc kubenswrapper[4769]: I1125 09:44:30.768303 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.110176 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.113213 4769 trace.go:236] Trace[1317917324]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 09:44:20.815) (total time: 10297ms): Nov 25 09:44:31 crc kubenswrapper[4769]: Trace[1317917324]: ---"Objects listed" error: 10297ms (09:44:31.113) Nov 25 09:44:31 crc kubenswrapper[4769]: Trace[1317917324]: [10.297592858s] [10.297592858s] END Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.113251 4769 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.113324 4769 trace.go:236] Trace[1495318455]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 09:44:20.408) (total time: 10704ms): Nov 25 09:44:31 crc kubenswrapper[4769]: Trace[1495318455]: ---"Objects listed" error: 10704ms (09:44:31.113) Nov 25 09:44:31 crc kubenswrapper[4769]: Trace[1495318455]: [10.70440739s] [10.70440739s] END Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.113352 4769 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.114177 4769 trace.go:236] Trace[2017464122]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 09:44:21.112) (total time: 10001ms): Nov 25 09:44:31 crc kubenswrapper[4769]: Trace[2017464122]: ---"Objects listed" error: 10001ms (09:44:31.114) Nov 25 09:44:31 crc kubenswrapper[4769]: Trace[2017464122]: [10.001732169s] [10.001732169s] END Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.114198 4769 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.115091 4769 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.115241 4769 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.117833 4769 trace.go:236] Trace[2116171042]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 09:44:21.107) (total time: 10010ms): Nov 25 09:44:31 crc kubenswrapper[4769]: Trace[2116171042]: ---"Objects listed" error: 10009ms (09:44:31.117) Nov 25 09:44:31 crc kubenswrapper[4769]: Trace[2116171042]: [10.010087637s] [10.010087637s] END Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.117862 4769 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.176124 4769 apiserver.go:52] "Watching apiserver" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.178902 4769 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.179175 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf"] Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.179565 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.179689 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.179738 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.179943 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.180060 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.180359 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.180432 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.180514 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.180660 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.182346 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.182381 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.182855 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.183077 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.183150 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.183485 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.184406 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.184549 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.184628 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.214373 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.215591 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.215648 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.215667 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.215684 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.215709 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.215730 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.215748 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.215765 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.215781 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.215811 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.215830 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.215852 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.217044 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.217069 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.217648 4769 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.220594 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.226614 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.226919 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.229846 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.230389 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.230419 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.230437 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.230518 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:31.730491555 +0000 UTC m=+20.315463868 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.234926 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.236183 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.237197 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.237309 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.237390 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.237519 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:31.737498587 +0000 UTC m=+20.322471060 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.238562 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.244614 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.258037 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.272992 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.280403 4769 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.282956 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.292448 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.316187 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.316252 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.316619 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.316672 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.316701 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.316725 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.316760 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.316778 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.316795 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.316811 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.316867 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317337 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317533 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317614 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317638 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317615 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317751 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317662 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317780 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317831 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317855 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317876 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317897 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317897 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317915 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.317993 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318013 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318044 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318069 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318097 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318129 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318152 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318172 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318189 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318205 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318198 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318296 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318320 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318342 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318363 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318384 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318393 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318401 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318423 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318443 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318461 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318484 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318481 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318502 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318523 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318509 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318544 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318564 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318581 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318598 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318615 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318635 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318652 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318675 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318692 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318711 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318729 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318766 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318788 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318798 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318782 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318862 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318892 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318905 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318925 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318976 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318997 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319209 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319249 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319266 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319284 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319301 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319319 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319336 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319351 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319367 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319385 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319401 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319420 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319439 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319458 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319476 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319495 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319511 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319531 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319550 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319567 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319585 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319605 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319627 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319647 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319666 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319719 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319746 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319764 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319814 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319833 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319853 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319899 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319915 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319946 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319978 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319995 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320014 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320033 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320053 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320071 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320088 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320105 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320124 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320140 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320157 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320175 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320191 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320207 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320224 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320240 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320261 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320294 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320318 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320340 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320366 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320394 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320419 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320436 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320462 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320479 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320495 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320511 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320529 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320545 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320563 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320582 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320597 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320613 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320631 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320649 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320669 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320688 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320705 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320722 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320741 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320757 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320774 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320792 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320809 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320886 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320907 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320927 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320945 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320983 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321002 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321021 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321039 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321056 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321073 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321136 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321171 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321201 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321221 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321238 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321262 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321279 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321297 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321314 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321333 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321352 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321371 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321390 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321410 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321427 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321443 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321462 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321478 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321495 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321538 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321557 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321590 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321612 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321632 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321650 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321668 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321687 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321703 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321719 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321741 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321762 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321779 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321798 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321815 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321834 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321853 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321872 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321891 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321909 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321926 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321944 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321978 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321999 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322016 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322035 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322054 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322073 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322092 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322109 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322125 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322269 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322306 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322331 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322385 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322450 4769 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322463 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322474 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322484 4769 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322496 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322506 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322517 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322527 4769 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322537 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322548 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322558 4769 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322569 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322581 4769 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322593 4769 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322603 4769 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322614 4769 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.318925 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.323123 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319035 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319164 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319185 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319410 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319420 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319520 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319603 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319694 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319715 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.319841 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320768 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.320875 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321092 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321383 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321484 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321570 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321598 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321618 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321776 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321822 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321923 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.321941 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322046 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322159 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322181 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322331 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322585 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322712 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322203 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322802 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322862 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.322798 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.323371 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.323106 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.323147 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.323902 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.323931 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.324206 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.324261 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.324313 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.324442 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.324552 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.324561 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.324770 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.324945 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.325257 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.325409 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.325841 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.326049 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.326252 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.326275 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.326420 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.326730 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.326770 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.326813 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.327119 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:44:31.827092837 +0000 UTC m=+20.412065150 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.327228 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.327255 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.327277 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.327325 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.327501 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.327874 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.328041 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.328390 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.328422 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.328575 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.328602 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.329071 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.329162 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.329218 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.329494 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.329556 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.329809 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.330042 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.330075 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.329765 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.330192 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.330512 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.330606 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.330670 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.330201 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.330923 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.331132 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.331198 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.331219 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.331233 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.331266 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.331433 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.331453 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.330322 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.331736 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.332102 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.332149 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.332368 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.332405 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.332646 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.333011 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.333052 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.333358 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.333431 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.333542 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.333702 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.333740 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.333820 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.333863 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.334042 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.334077 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.334113 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.334178 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.334183 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.334283 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.334578 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.334658 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.334752 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.335268 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.335298 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.335446 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.335590 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.335631 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.335673 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.335721 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:31.835696152 +0000 UTC m=+20.420668465 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.335840 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.336632 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.336762 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:31.835905478 +0000 UTC m=+20.420877811 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.336860 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.336919 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.337081 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.337340 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.337655 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.338043 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.338154 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.338543 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.338565 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.338720 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.338953 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.339156 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.339040 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.339404 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.339594 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.339866 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.339918 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.339976 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.340386 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.340577 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.340977 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.341207 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.341354 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.341564 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.341934 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.342068 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.343623 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.343987 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.344219 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.344295 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.344449 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.344815 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.344842 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.344936 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.345034 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.345680 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.345852 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.346207 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.346639 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.345681 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.346855 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.346829 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.346980 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.347079 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.347104 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.347170 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.347255 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.347573 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.347768 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.347909 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.347941 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.347884 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.348095 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.348834 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.348927 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.377579 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.385300 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.393574 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.397188 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.404711 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.424070 4769 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.424381 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.424494 4769 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.424550 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.424601 4769 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.424660 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.424711 4769 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.424760 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.424817 4769 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.424871 4769 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.424927 4769 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425016 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425072 4769 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425123 4769 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425173 4769 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425225 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425357 4769 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425411 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425482 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425537 4769 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425594 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425665 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425726 4769 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425777 4769 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425828 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425878 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.425937 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426015 4769 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426077 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426128 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426179 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426229 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426279 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426336 4769 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426400 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426464 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426517 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426568 4769 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426618 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426673 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426731 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426793 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426846 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426896 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.426956 4769 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427039 4769 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427093 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427143 4769 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427194 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427242 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427297 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427352 4769 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427410 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427460 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427511 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427571 4769 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427632 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427685 4769 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427738 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427789 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427842 4769 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427896 4769 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.427982 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428046 4769 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428099 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428148 4769 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428197 4769 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428256 4769 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428308 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428357 4769 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428409 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428457 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428512 4769 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428568 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428622 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428670 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428719 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428776 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428836 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.428911 4769 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429003 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429091 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429167 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429249 4769 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429337 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429413 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429486 4769 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429548 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429600 4769 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429652 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429708 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429771 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429821 4769 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429871 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.429932 4769 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430022 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430086 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430137 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430193 4769 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430250 4769 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430301 4769 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430354 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430411 4769 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430464 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430512 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430561 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430608 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430663 4769 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430718 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430770 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430820 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430875 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430929 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.430998 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431059 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431111 4769 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431162 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431210 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431269 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431321 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431371 4769 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431429 4769 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431480 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431528 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431586 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431635 4769 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431689 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431740 4769 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431789 4769 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431843 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431894 4769 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.431944 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432055 4769 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432111 4769 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432162 4769 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432217 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432268 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432320 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432374 4769 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432440 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432501 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432556 4769 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432606 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432656 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432705 4769 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432764 4769 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432819 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432872 4769 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432929 4769 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.432997 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433055 4769 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433107 4769 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433168 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433221 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433277 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433331 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433381 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433437 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433527 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433594 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433646 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433696 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433753 4769 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433809 4769 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433861 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.433910 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.434030 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.434095 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.434147 4769 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.434207 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.434259 4769 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.434309 4769 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.434373 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.434466 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.434522 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.434571 4769 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.434623 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.434676 4769 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.499607 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.511792 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 09:44:31 crc kubenswrapper[4769]: W1125 09:44:31.515499 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-97ced262a9a8d4c7e5beab365ef20fc34ac53cdf7eb3ce2b8896e173d7810fb7 WatchSource:0}: Error finding container 97ced262a9a8d4c7e5beab365ef20fc34ac53cdf7eb3ce2b8896e173d7810fb7: Status 404 returned error can't find the container with id 97ced262a9a8d4c7e5beab365ef20fc34ac53cdf7eb3ce2b8896e173d7810fb7 Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.520722 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 09:44:31 crc kubenswrapper[4769]: W1125 09:44:31.525369 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-c9e31c4fc5aeb9ff7a2bfa3121bd099bf13e7eb3d4f42add8da0398740bb79aa WatchSource:0}: Error finding container c9e31c4fc5aeb9ff7a2bfa3121bd099bf13e7eb3d4f42add8da0398740bb79aa: Status 404 returned error can't find the container with id c9e31c4fc5aeb9ff7a2bfa3121bd099bf13e7eb3d4f42add8da0398740bb79aa Nov 25 09:44:31 crc kubenswrapper[4769]: W1125 09:44:31.541852 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-f89cb2af012524609bc73becc55cef4005f0e7c0c44b1528c2712482a29bd4ae WatchSource:0}: Error finding container f89cb2af012524609bc73becc55cef4005f0e7c0c44b1528c2712482a29bd4ae: Status 404 returned error can't find the container with id f89cb2af012524609bc73becc55cef4005f0e7c0c44b1528c2712482a29bd4ae Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.737498 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.737642 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.737667 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.737679 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.737730 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:32.737716644 +0000 UTC m=+21.322688947 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.839037 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.839329 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.839439 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.839496 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.839524 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:44:32.839465746 +0000 UTC m=+21.424438059 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.839602 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.839727 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.839782 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.839787 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.839821 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.839894 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:32.83962 +0000 UTC m=+21.424592353 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.839948 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:32.839927698 +0000 UTC m=+21.424900141 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:44:31 crc kubenswrapper[4769]: E1125 09:44:31.840038 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:32.840016301 +0000 UTC m=+21.424988664 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.972599 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.984583 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.986048 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 25 09:44:31 crc kubenswrapper[4769]: I1125 09:44:31.989185 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.003024 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.016998 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.031551 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.047744 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.059098 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.070645 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.081621 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.099596 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.123121 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.134794 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.147583 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.162906 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.173804 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.185425 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.242747 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.243477 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.245252 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.246012 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.247298 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.248078 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.249379 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.251924 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.254241 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.255857 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.256605 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.258151 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.259709 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.261253 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.261457 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.262864 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.270733 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.271743 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.272283 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.272888 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.273534 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.274073 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.274819 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.275285 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.275656 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.276070 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.276615 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.277459 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.278303 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.278825 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.279575 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.283590 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.284316 4769 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.284465 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.286930 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.288073 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.288657 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.290914 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.291871 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.292395 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.293066 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.294202 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.300026 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.300816 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.302027 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.302731 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.303727 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.304232 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.305280 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.305800 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.306879 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.307463 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.308479 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.309030 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.309674 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.310659 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.311162 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.316189 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.331840 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.346115 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.360080 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.365574 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57"} Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.365639 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"f89cb2af012524609bc73becc55cef4005f0e7c0c44b1528c2712482a29bd4ae"} Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.367421 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c9e31c4fc5aeb9ff7a2bfa3121bd099bf13e7eb3d4f42add8da0398740bb79aa"} Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.369766 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17"} Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.369853 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1"} Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.369876 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"97ced262a9a8d4c7e5beab365ef20fc34ac53cdf7eb3ce2b8896e173d7810fb7"} Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.378777 4769 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.379486 4769 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.380258 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.397623 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.422431 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.450458 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.468546 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.490133 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.505908 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.517280 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.533077 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.753388 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.753606 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.753631 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.753648 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.753720 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:34.753698622 +0000 UTC m=+23.338670935 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.853991 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.854115 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.854154 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:32 crc kubenswrapper[4769]: I1125 09:44:32.854190 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.854336 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.854367 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.854465 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:34.854439227 +0000 UTC m=+23.439411550 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.854478 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.854551 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.854568 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.854502 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:34.854490418 +0000 UTC m=+23.439462731 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.854654 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:34.854629512 +0000 UTC m=+23.439601825 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:32 crc kubenswrapper[4769]: E1125 09:44:32.855072 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:44:34.855031123 +0000 UTC m=+23.440003566 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:44:33 crc kubenswrapper[4769]: I1125 09:44:33.236003 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:33 crc kubenswrapper[4769]: I1125 09:44:33.236044 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:33 crc kubenswrapper[4769]: I1125 09:44:33.236096 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:33 crc kubenswrapper[4769]: E1125 09:44:33.236221 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:33 crc kubenswrapper[4769]: E1125 09:44:33.236381 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:33 crc kubenswrapper[4769]: E1125 09:44:33.236530 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:34 crc kubenswrapper[4769]: I1125 09:44:34.771091 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.771268 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.771293 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.771308 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.771369 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:38.771353577 +0000 UTC m=+27.356325900 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:34 crc kubenswrapper[4769]: I1125 09:44:34.872462 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:34 crc kubenswrapper[4769]: I1125 09:44:34.872578 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:34 crc kubenswrapper[4769]: I1125 09:44:34.872619 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:34 crc kubenswrapper[4769]: I1125 09:44:34.872651 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.872816 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.872894 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:38.872872263 +0000 UTC m=+27.457844576 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.872958 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:44:38.872952245 +0000 UTC m=+27.457924558 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.873143 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.873184 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:38.873170751 +0000 UTC m=+27.458143064 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.873291 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.873360 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.873384 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:34 crc kubenswrapper[4769]: E1125 09:44:34.873485 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:38.873457419 +0000 UTC m=+27.458429762 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:35 crc kubenswrapper[4769]: I1125 09:44:35.236318 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:35 crc kubenswrapper[4769]: I1125 09:44:35.236345 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:35 crc kubenswrapper[4769]: E1125 09:44:35.236494 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:35 crc kubenswrapper[4769]: I1125 09:44:35.236273 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:35 crc kubenswrapper[4769]: E1125 09:44:35.236651 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:35 crc kubenswrapper[4769]: E1125 09:44:35.236778 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:35 crc kubenswrapper[4769]: I1125 09:44:35.380354 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0"} Nov 25 09:44:35 crc kubenswrapper[4769]: I1125 09:44:35.408646 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:35 crc kubenswrapper[4769]: I1125 09:44:35.444635 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:35 crc kubenswrapper[4769]: I1125 09:44:35.466851 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:35 crc kubenswrapper[4769]: I1125 09:44:35.488705 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:35 crc kubenswrapper[4769]: I1125 09:44:35.506369 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:35 crc kubenswrapper[4769]: I1125 09:44:35.526365 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:35 crc kubenswrapper[4769]: I1125 09:44:35.544224 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:35 crc kubenswrapper[4769]: I1125 09:44:35.561121 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.235864 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.235918 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.235930 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:37 crc kubenswrapper[4769]: E1125 09:44:37.236060 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:37 crc kubenswrapper[4769]: E1125 09:44:37.236155 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:37 crc kubenswrapper[4769]: E1125 09:44:37.236211 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.515665 4769 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.518239 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.518298 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.518317 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.518400 4769 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.525274 4769 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.525684 4769 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.527155 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.527179 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.527191 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.527211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.527224 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:37Z","lastTransitionTime":"2025-11-25T09:44:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:37 crc kubenswrapper[4769]: E1125 09:44:37.552202 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.556345 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.556398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.556410 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.556429 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.556442 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:37Z","lastTransitionTime":"2025-11-25T09:44:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:37 crc kubenswrapper[4769]: E1125 09:44:37.571885 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.575844 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.575892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.575902 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.575918 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.575930 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:37Z","lastTransitionTime":"2025-11-25T09:44:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:37 crc kubenswrapper[4769]: E1125 09:44:37.588382 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.595852 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.595922 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.595943 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.596003 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.596019 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:37Z","lastTransitionTime":"2025-11-25T09:44:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:37 crc kubenswrapper[4769]: E1125 09:44:37.613262 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.625478 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.625529 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.625542 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.625562 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.625579 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:37Z","lastTransitionTime":"2025-11-25T09:44:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:37 crc kubenswrapper[4769]: E1125 09:44:37.654173 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: E1125 09:44:37.654291 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.655826 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.655857 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.655870 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.655887 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.655897 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:37Z","lastTransitionTime":"2025-11-25T09:44:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.733126 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-kzpxc"] Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.734153 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-kzpxc" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.736515 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.737106 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.738406 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.755219 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.758594 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.758629 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.758639 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.758657 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.758671 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:37Z","lastTransitionTime":"2025-11-25T09:44:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.771003 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.786549 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.805138 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.817693 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.831634 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.847009 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.859273 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.860740 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.860772 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.860784 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.860799 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.860815 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:37Z","lastTransitionTime":"2025-11-25T09:44:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.872988 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:37Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.899389 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/cb3a206d-ee72-415c-bc23-2e1b2d6f8592-hosts-file\") pod \"node-resolver-kzpxc\" (UID: \"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\") " pod="openshift-dns/node-resolver-kzpxc" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.899495 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv2k5\" (UniqueName: \"kubernetes.io/projected/cb3a206d-ee72-415c-bc23-2e1b2d6f8592-kube-api-access-jv2k5\") pod \"node-resolver-kzpxc\" (UID: \"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\") " pod="openshift-dns/node-resolver-kzpxc" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.963651 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.963701 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.963711 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.963733 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:37 crc kubenswrapper[4769]: I1125 09:44:37.963746 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:37Z","lastTransitionTime":"2025-11-25T09:44:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.000200 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/cb3a206d-ee72-415c-bc23-2e1b2d6f8592-hosts-file\") pod \"node-resolver-kzpxc\" (UID: \"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\") " pod="openshift-dns/node-resolver-kzpxc" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.000294 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv2k5\" (UniqueName: \"kubernetes.io/projected/cb3a206d-ee72-415c-bc23-2e1b2d6f8592-kube-api-access-jv2k5\") pod \"node-resolver-kzpxc\" (UID: \"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\") " pod="openshift-dns/node-resolver-kzpxc" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.000778 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/cb3a206d-ee72-415c-bc23-2e1b2d6f8592-hosts-file\") pod \"node-resolver-kzpxc\" (UID: \"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\") " pod="openshift-dns/node-resolver-kzpxc" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.025287 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv2k5\" (UniqueName: \"kubernetes.io/projected/cb3a206d-ee72-415c-bc23-2e1b2d6f8592-kube-api-access-jv2k5\") pod \"node-resolver-kzpxc\" (UID: \"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\") " pod="openshift-dns/node-resolver-kzpxc" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.052220 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-kzpxc" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.066594 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.066644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.066656 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.066675 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.066686 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:38Z","lastTransitionTime":"2025-11-25T09:44:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.132027 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-98mzt"] Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.133065 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.133727 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-s47tv"] Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.134181 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-27dbp"] Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.134923 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.134940 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.138359 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.138466 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.138518 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.138556 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.138799 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.138817 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.138807 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.138919 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.141009 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.141231 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.141380 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.143260 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.164233 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.171360 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.171399 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.171412 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.171431 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.171444 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:38Z","lastTransitionTime":"2025-11-25T09:44:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.180315 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.197101 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.201923 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmtw6\" (UniqueName: \"kubernetes.io/projected/d58c71b5-5dc4-45c1-9b58-9740a35d2256-kube-api-access-vmtw6\") pod \"machine-config-daemon-98mzt\" (UID: \"d58c71b5-5dc4-45c1-9b58-9740a35d2256\") " pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.202035 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d58c71b5-5dc4-45c1-9b58-9740a35d2256-mcd-auth-proxy-config\") pod \"machine-config-daemon-98mzt\" (UID: \"d58c71b5-5dc4-45c1-9b58-9740a35d2256\") " pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.202065 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d58c71b5-5dc4-45c1-9b58-9740a35d2256-rootfs\") pod \"machine-config-daemon-98mzt\" (UID: \"d58c71b5-5dc4-45c1-9b58-9740a35d2256\") " pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.202081 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d58c71b5-5dc4-45c1-9b58-9740a35d2256-proxy-tls\") pod \"machine-config-daemon-98mzt\" (UID: \"d58c71b5-5dc4-45c1-9b58-9740a35d2256\") " pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.217918 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.262933 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.273567 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.273810 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.273882 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.273981 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.274063 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:38Z","lastTransitionTime":"2025-11-25T09:44:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303389 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-system-cni-dir\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303426 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-os-release\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303446 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-run-multus-certs\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303462 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-etc-kubernetes\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303481 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/46575153-4800-4ed2-8aa3-b66b98a9c899-os-release\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303498 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-multus-socket-dir-parent\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303533 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/025219f0-bc69-4a33-acaa-b055607272bb-cni-binary-copy\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303574 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/46575153-4800-4ed2-8aa3-b66b98a9c899-cni-binary-copy\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303650 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-var-lib-cni-multus\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303702 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/46575153-4800-4ed2-8aa3-b66b98a9c899-system-cni-dir\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303749 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-var-lib-cni-bin\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303771 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d58c71b5-5dc4-45c1-9b58-9740a35d2256-rootfs\") pod \"machine-config-daemon-98mzt\" (UID: \"d58c71b5-5dc4-45c1-9b58-9740a35d2256\") " pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303798 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-cnibin\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303826 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-run-netns\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303837 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d58c71b5-5dc4-45c1-9b58-9740a35d2256-rootfs\") pod \"machine-config-daemon-98mzt\" (UID: \"d58c71b5-5dc4-45c1-9b58-9740a35d2256\") " pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303857 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/46575153-4800-4ed2-8aa3-b66b98a9c899-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303897 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmtw6\" (UniqueName: \"kubernetes.io/projected/d58c71b5-5dc4-45c1-9b58-9740a35d2256-kube-api-access-vmtw6\") pod \"machine-config-daemon-98mzt\" (UID: \"d58c71b5-5dc4-45c1-9b58-9740a35d2256\") " pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303917 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/46575153-4800-4ed2-8aa3-b66b98a9c899-cnibin\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303941 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-var-lib-kubelet\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.303984 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/025219f0-bc69-4a33-acaa-b055607272bb-multus-daemon-config\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.304002 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/46575153-4800-4ed2-8aa3-b66b98a9c899-tuning-conf-dir\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.304021 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7rjx\" (UniqueName: \"kubernetes.io/projected/025219f0-bc69-4a33-acaa-b055607272bb-kube-api-access-n7rjx\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.304039 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d58c71b5-5dc4-45c1-9b58-9740a35d2256-mcd-auth-proxy-config\") pod \"machine-config-daemon-98mzt\" (UID: \"d58c71b5-5dc4-45c1-9b58-9740a35d2256\") " pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.304055 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-multus-cni-dir\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.304079 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-run-k8s-cni-cncf-io\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.304096 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brknc\" (UniqueName: \"kubernetes.io/projected/46575153-4800-4ed2-8aa3-b66b98a9c899-kube-api-access-brknc\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.304113 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d58c71b5-5dc4-45c1-9b58-9740a35d2256-proxy-tls\") pod \"machine-config-daemon-98mzt\" (UID: \"d58c71b5-5dc4-45c1-9b58-9740a35d2256\") " pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.304128 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-multus-conf-dir\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.304145 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-hostroot\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.304820 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.305161 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d58c71b5-5dc4-45c1-9b58-9740a35d2256-mcd-auth-proxy-config\") pod \"machine-config-daemon-98mzt\" (UID: \"d58c71b5-5dc4-45c1-9b58-9740a35d2256\") " pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.310796 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d58c71b5-5dc4-45c1-9b58-9740a35d2256-proxy-tls\") pod \"machine-config-daemon-98mzt\" (UID: \"d58c71b5-5dc4-45c1-9b58-9740a35d2256\") " pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.331645 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmtw6\" (UniqueName: \"kubernetes.io/projected/d58c71b5-5dc4-45c1-9b58-9740a35d2256-kube-api-access-vmtw6\") pod \"machine-config-daemon-98mzt\" (UID: \"d58c71b5-5dc4-45c1-9b58-9740a35d2256\") " pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.332173 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.347270 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.370070 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.376466 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.376957 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.377065 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.377215 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.377292 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:38Z","lastTransitionTime":"2025-11-25T09:44:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.391006 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-kzpxc" event={"ID":"cb3a206d-ee72-415c-bc23-2e1b2d6f8592","Type":"ContainerStarted","Data":"8de8f89ed402477ae253aab5730cd8c605ae1826e08bc2b8d58bc8f9e0dd5537"} Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.394131 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.404906 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brknc\" (UniqueName: \"kubernetes.io/projected/46575153-4800-4ed2-8aa3-b66b98a9c899-kube-api-access-brknc\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.404948 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-multus-cni-dir\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.404987 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-run-k8s-cni-cncf-io\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405005 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-multus-conf-dir\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405019 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-hostroot\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405034 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-os-release\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405049 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-run-multus-certs\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405068 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-system-cni-dir\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405082 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-etc-kubernetes\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405102 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/46575153-4800-4ed2-8aa3-b66b98a9c899-os-release\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405100 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-multus-cni-dir\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405117 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-multus-socket-dir-parent\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405132 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/025219f0-bc69-4a33-acaa-b055607272bb-cni-binary-copy\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405147 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/46575153-4800-4ed2-8aa3-b66b98a9c899-cni-binary-copy\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405130 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-multus-conf-dir\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405188 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-var-lib-cni-multus\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405194 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-etc-kubernetes\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405166 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-run-k8s-cni-cncf-io\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405163 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-var-lib-cni-multus\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405240 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/46575153-4800-4ed2-8aa3-b66b98a9c899-os-release\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405250 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-hostroot\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405282 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/46575153-4800-4ed2-8aa3-b66b98a9c899-system-cni-dir\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405327 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-os-release\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405339 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-var-lib-cni-bin\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405368 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-run-multus-certs\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405374 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-cnibin\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405402 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-run-netns\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405416 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-system-cni-dir\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405438 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/46575153-4800-4ed2-8aa3-b66b98a9c899-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405458 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-var-lib-cni-bin\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405466 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/46575153-4800-4ed2-8aa3-b66b98a9c899-cnibin\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405497 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/46575153-4800-4ed2-8aa3-b66b98a9c899-system-cni-dir\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405502 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-var-lib-kubelet\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405286 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-multus-socket-dir-parent\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405573 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/025219f0-bc69-4a33-acaa-b055607272bb-multus-daemon-config\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405599 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/46575153-4800-4ed2-8aa3-b66b98a9c899-tuning-conf-dir\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405611 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-cnibin\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405622 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7rjx\" (UniqueName: \"kubernetes.io/projected/025219f0-bc69-4a33-acaa-b055607272bb-kube-api-access-n7rjx\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405651 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-var-lib-kubelet\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405871 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/025219f0-bc69-4a33-acaa-b055607272bb-cni-binary-copy\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.405576 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/025219f0-bc69-4a33-acaa-b055607272bb-host-run-netns\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.406327 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/46575153-4800-4ed2-8aa3-b66b98a9c899-cni-binary-copy\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.406380 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/46575153-4800-4ed2-8aa3-b66b98a9c899-cnibin\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.406391 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/46575153-4800-4ed2-8aa3-b66b98a9c899-tuning-conf-dir\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.406415 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/46575153-4800-4ed2-8aa3-b66b98a9c899-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.406524 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/025219f0-bc69-4a33-acaa-b055607272bb-multus-daemon-config\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.424156 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7rjx\" (UniqueName: \"kubernetes.io/projected/025219f0-bc69-4a33-acaa-b055607272bb-kube-api-access-n7rjx\") pod \"multus-s47tv\" (UID: \"025219f0-bc69-4a33-acaa-b055607272bb\") " pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.424164 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brknc\" (UniqueName: \"kubernetes.io/projected/46575153-4800-4ed2-8aa3-b66b98a9c899-kube-api-access-brknc\") pod \"multus-additional-cni-plugins-27dbp\" (UID: \"46575153-4800-4ed2-8aa3-b66b98a9c899\") " pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.425137 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.448707 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.448739 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.453424 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-s47tv" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.464463 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.464505 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-27dbp" Nov 25 09:44:38 crc kubenswrapper[4769]: W1125 09:44:38.466856 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd58c71b5_5dc4_45c1_9b58_9740a35d2256.slice/crio-973887bcda4cad0a4fe6ab371407c7623c9d25b47eaa31657094f37004795bed WatchSource:0}: Error finding container 973887bcda4cad0a4fe6ab371407c7623c9d25b47eaa31657094f37004795bed: Status 404 returned error can't find the container with id 973887bcda4cad0a4fe6ab371407c7623c9d25b47eaa31657094f37004795bed Nov 25 09:44:38 crc kubenswrapper[4769]: W1125 09:44:38.469473 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod025219f0_bc69_4a33_acaa_b055607272bb.slice/crio-d2d64a44cd80cc72ef18642fb81d5e2a873ed3ddd8ad827dc8c60542b4622a1a WatchSource:0}: Error finding container d2d64a44cd80cc72ef18642fb81d5e2a873ed3ddd8ad827dc8c60542b4622a1a: Status 404 returned error can't find the container with id d2d64a44cd80cc72ef18642fb81d5e2a873ed3ddd8ad827dc8c60542b4622a1a Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.477599 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: W1125 09:44:38.478843 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod46575153_4800_4ed2_8aa3_b66b98a9c899.slice/crio-dc82fcc8b8d151d74ac6f50da88e5df94bf65b01a9a65baf57ea40034f8e8efc WatchSource:0}: Error finding container dc82fcc8b8d151d74ac6f50da88e5df94bf65b01a9a65baf57ea40034f8e8efc: Status 404 returned error can't find the container with id dc82fcc8b8d151d74ac6f50da88e5df94bf65b01a9a65baf57ea40034f8e8efc Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.479472 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.479509 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.479518 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.479533 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.479543 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:38Z","lastTransitionTime":"2025-11-25T09:44:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.497156 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.513692 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.527685 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.552181 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.566483 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-kfvzs"] Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.567775 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.571090 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.571144 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.571158 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.571093 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.571451 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.571508 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.571821 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.582181 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.582222 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.582236 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.582256 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.582270 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:38Z","lastTransitionTime":"2025-11-25T09:44:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.585407 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.601498 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.618168 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.633251 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.648848 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.663713 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.677898 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.685443 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.685478 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.685490 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.685509 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.685522 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:38Z","lastTransitionTime":"2025-11-25T09:44:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.695193 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.708553 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.708861 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/069c06c6-fe60-41d0-b96d-86606f55b258-ovn-node-metrics-cert\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.708917 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wsqq\" (UniqueName: \"kubernetes.io/projected/069c06c6-fe60-41d0-b96d-86606f55b258-kube-api-access-9wsqq\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709073 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-etc-openvswitch\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709107 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-systemd\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709134 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-run-ovn-kubernetes\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709153 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-run-netns\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709172 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-var-lib-openvswitch\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709198 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-kubelet\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709217 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-systemd-units\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709239 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709263 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-ovnkube-config\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709304 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-log-socket\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709323 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-ovnkube-script-lib\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709342 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-slash\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709357 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-cni-netd\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709402 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-ovn\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709421 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-env-overrides\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709451 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-openvswitch\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709467 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-node-log\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.709482 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-cni-bin\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.722202 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.743330 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.763168 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.785704 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.789941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.790011 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.790024 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.790044 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.790058 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:38Z","lastTransitionTime":"2025-11-25T09:44:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.804987 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811040 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811096 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/069c06c6-fe60-41d0-b96d-86606f55b258-ovn-node-metrics-cert\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811132 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wsqq\" (UniqueName: \"kubernetes.io/projected/069c06c6-fe60-41d0-b96d-86606f55b258-kube-api-access-9wsqq\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811165 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-etc-openvswitch\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811188 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-systemd\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811222 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-run-ovn-kubernetes\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811248 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-var-lib-openvswitch\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811273 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-kubelet\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811296 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-systemd-units\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811321 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-run-netns\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811345 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811372 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-ovnkube-config\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811399 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-log-socket\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811425 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-ovnkube-script-lib\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811462 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-slash\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811465 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-etc-openvswitch\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811497 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-kubelet\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811547 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-cni-netd\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811568 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-log-socket\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.811581 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811612 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-var-lib-openvswitch\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.811641 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811497 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-systemd-units\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811618 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-slash\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811532 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-run-ovn-kubernetes\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811619 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.811665 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811579 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-run-netns\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.811932 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:46.811894291 +0000 UTC m=+35.396866764 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811768 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-systemd\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.811489 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-cni-netd\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.812081 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-ovn\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.812150 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-openvswitch\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.812184 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-node-log\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.812201 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-cni-bin\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.812216 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-env-overrides\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.812426 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-openvswitch\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.812468 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-ovn\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.812462 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-node-log\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.812485 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-cni-bin\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.813544 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-ovnkube-script-lib\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.813571 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-env-overrides\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.813594 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-ovnkube-config\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.818305 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/069c06c6-fe60-41d0-b96d-86606f55b258-ovn-node-metrics-cert\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.819314 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.829587 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wsqq\" (UniqueName: \"kubernetes.io/projected/069c06c6-fe60-41d0-b96d-86606f55b258-kube-api-access-9wsqq\") pod \"ovnkube-node-kfvzs\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.833716 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.846268 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:38Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.883545 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.892777 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.892816 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.892827 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.892875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.892891 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:38Z","lastTransitionTime":"2025-11-25T09:44:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:38 crc kubenswrapper[4769]: W1125 09:44:38.899190 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod069c06c6_fe60_41d0_b96d_86606f55b258.slice/crio-d5b0eeb88ebfd4de242398d7b2b6308ff4c161977fd5e72c60ccd1064825a64b WatchSource:0}: Error finding container d5b0eeb88ebfd4de242398d7b2b6308ff4c161977fd5e72c60ccd1064825a64b: Status 404 returned error can't find the container with id d5b0eeb88ebfd4de242398d7b2b6308ff4c161977fd5e72c60ccd1064825a64b Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.913244 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.913451 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.913501 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.913530 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.913649 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.913709 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.913732 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.913765 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.913771 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:46.913746036 +0000 UTC m=+35.498718349 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.913784 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.913824 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:46.913798607 +0000 UTC m=+35.498770920 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.913860 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:46.913849929 +0000 UTC m=+35.498822462 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:38 crc kubenswrapper[4769]: E1125 09:44:38.914049 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:44:46.914015003 +0000 UTC m=+35.498987316 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.995161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.995223 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.995238 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.995266 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:38 crc kubenswrapper[4769]: I1125 09:44:38.995327 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:38Z","lastTransitionTime":"2025-11-25T09:44:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.098474 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.098505 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.098513 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.098528 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.098539 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:39Z","lastTransitionTime":"2025-11-25T09:44:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.201888 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.201946 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.201984 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.202021 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.202037 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:39Z","lastTransitionTime":"2025-11-25T09:44:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.236294 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.236361 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:39 crc kubenswrapper[4769]: E1125 09:44:39.236465 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.236594 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:39 crc kubenswrapper[4769]: E1125 09:44:39.236783 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:39 crc kubenswrapper[4769]: E1125 09:44:39.236940 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.304505 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.304543 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.304558 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.304577 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.304588 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:39Z","lastTransitionTime":"2025-11-25T09:44:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.396536 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.396589 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.396604 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"973887bcda4cad0a4fe6ab371407c7623c9d25b47eaa31657094f37004795bed"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.399208 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-kzpxc" event={"ID":"cb3a206d-ee72-415c-bc23-2e1b2d6f8592","Type":"ContainerStarted","Data":"f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.401530 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-s47tv" event={"ID":"025219f0-bc69-4a33-acaa-b055607272bb","Type":"ContainerStarted","Data":"b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.401576 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-s47tv" event={"ID":"025219f0-bc69-4a33-acaa-b055607272bb","Type":"ContainerStarted","Data":"d2d64a44cd80cc72ef18642fb81d5e2a873ed3ddd8ad827dc8c60542b4622a1a"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.404629 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065" exitCode=0 Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.404728 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.404788 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"d5b0eeb88ebfd4de242398d7b2b6308ff4c161977fd5e72c60ccd1064825a64b"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.406539 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.406593 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.406605 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.406628 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.406635 4769 generic.go:334] "Generic (PLEG): container finished" podID="46575153-4800-4ed2-8aa3-b66b98a9c899" containerID="6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f" exitCode=0 Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.406642 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:39Z","lastTransitionTime":"2025-11-25T09:44:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.406671 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" event={"ID":"46575153-4800-4ed2-8aa3-b66b98a9c899","Type":"ContainerDied","Data":"6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.406699 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" event={"ID":"46575153-4800-4ed2-8aa3-b66b98a9c899","Type":"ContainerStarted","Data":"dc82fcc8b8d151d74ac6f50da88e5df94bf65b01a9a65baf57ea40034f8e8efc"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.414406 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.430956 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.451159 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.462756 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.478381 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.497378 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.509949 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.510015 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.510027 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.510047 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.510058 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:39Z","lastTransitionTime":"2025-11-25T09:44:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.525032 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.543079 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.556534 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.567273 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.582934 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.597367 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.608693 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.612624 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.612654 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.612665 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.612682 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.612695 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:39Z","lastTransitionTime":"2025-11-25T09:44:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.620621 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.634222 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.648538 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.660489 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.671637 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.686112 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.709254 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.716004 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.716042 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.716052 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.716067 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.716079 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:39Z","lastTransitionTime":"2025-11-25T09:44:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.732296 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.746322 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.766049 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.779288 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.794946 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.811853 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.818954 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.819031 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.819049 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.819072 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.819090 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:39Z","lastTransitionTime":"2025-11-25T09:44:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.922241 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.922294 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.922304 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.922324 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:39 crc kubenswrapper[4769]: I1125 09:44:39.922351 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:39Z","lastTransitionTime":"2025-11-25T09:44:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.025354 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.025881 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.025895 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.025915 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.025927 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:40Z","lastTransitionTime":"2025-11-25T09:44:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.131839 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.131898 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.131912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.131931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.131944 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:40Z","lastTransitionTime":"2025-11-25T09:44:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.235475 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.235530 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.235545 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.235565 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.235579 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:40Z","lastTransitionTime":"2025-11-25T09:44:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.339002 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.339046 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.339062 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.339085 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.339097 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:40Z","lastTransitionTime":"2025-11-25T09:44:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.416584 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.416652 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.416665 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.416675 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.416689 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.416700 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.418684 4769 generic.go:334] "Generic (PLEG): container finished" podID="46575153-4800-4ed2-8aa3-b66b98a9c899" containerID="42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6" exitCode=0 Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.418801 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" event={"ID":"46575153-4800-4ed2-8aa3-b66b98a9c899","Type":"ContainerDied","Data":"42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.434444 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.442372 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.442418 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.442433 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.442453 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.442466 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:40Z","lastTransitionTime":"2025-11-25T09:44:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.451812 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.468558 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.486426 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.502328 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.515731 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.540773 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.551868 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.551922 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.551935 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.551955 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.551991 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:40Z","lastTransitionTime":"2025-11-25T09:44:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.572023 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.601086 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.618708 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.634110 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.648149 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.655339 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.655378 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.655394 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.655417 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.655437 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:40Z","lastTransitionTime":"2025-11-25T09:44:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.659926 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:40Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.759510 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.759567 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.759580 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.759599 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.759610 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:40Z","lastTransitionTime":"2025-11-25T09:44:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.862330 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.862383 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.862392 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.862420 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.862432 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:40Z","lastTransitionTime":"2025-11-25T09:44:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.965444 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.965494 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.965505 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.965523 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:40 crc kubenswrapper[4769]: I1125 09:44:40.965533 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:40Z","lastTransitionTime":"2025-11-25T09:44:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.068756 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.068800 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.068811 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.068835 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.068853 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:41Z","lastTransitionTime":"2025-11-25T09:44:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.172980 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.173032 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.173045 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.173067 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.173081 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:41Z","lastTransitionTime":"2025-11-25T09:44:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.210125 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-v24zk"] Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.210588 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-v24zk" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.212730 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.213127 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.213431 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.214594 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.228974 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.236118 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:41 crc kubenswrapper[4769]: E1125 09:44:41.236570 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.236385 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:41 crc kubenswrapper[4769]: E1125 09:44:41.236925 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.236297 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:41 crc kubenswrapper[4769]: E1125 09:44:41.237235 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.244299 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.257164 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.271822 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.276789 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.276981 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.277103 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.277206 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.277306 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:41Z","lastTransitionTime":"2025-11-25T09:44:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.294144 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.309465 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.323475 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.339366 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.346932 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a18d0f06-2fff-4e1a-9b11-01eaea85baa1-serviceca\") pod \"node-ca-v24zk\" (UID: \"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\") " pod="openshift-image-registry/node-ca-v24zk" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.347165 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a18d0f06-2fff-4e1a-9b11-01eaea85baa1-host\") pod \"node-ca-v24zk\" (UID: \"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\") " pod="openshift-image-registry/node-ca-v24zk" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.347305 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gdv8\" (UniqueName: \"kubernetes.io/projected/a18d0f06-2fff-4e1a-9b11-01eaea85baa1-kube-api-access-8gdv8\") pod \"node-ca-v24zk\" (UID: \"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\") " pod="openshift-image-registry/node-ca-v24zk" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.353695 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.368470 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.380439 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.380745 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.380817 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.380890 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.381042 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:41Z","lastTransitionTime":"2025-11-25T09:44:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.381403 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.393547 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.407329 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.420904 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.426256 4769 generic.go:334] "Generic (PLEG): container finished" podID="46575153-4800-4ed2-8aa3-b66b98a9c899" containerID="eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694" exitCode=0 Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.426337 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" event={"ID":"46575153-4800-4ed2-8aa3-b66b98a9c899","Type":"ContainerDied","Data":"eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694"} Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.443832 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.448607 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a18d0f06-2fff-4e1a-9b11-01eaea85baa1-host\") pod \"node-ca-v24zk\" (UID: \"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\") " pod="openshift-image-registry/node-ca-v24zk" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.448658 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a18d0f06-2fff-4e1a-9b11-01eaea85baa1-serviceca\") pod \"node-ca-v24zk\" (UID: \"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\") " pod="openshift-image-registry/node-ca-v24zk" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.448695 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gdv8\" (UniqueName: \"kubernetes.io/projected/a18d0f06-2fff-4e1a-9b11-01eaea85baa1-kube-api-access-8gdv8\") pod \"node-ca-v24zk\" (UID: \"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\") " pod="openshift-image-registry/node-ca-v24zk" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.448952 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a18d0f06-2fff-4e1a-9b11-01eaea85baa1-host\") pod \"node-ca-v24zk\" (UID: \"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\") " pod="openshift-image-registry/node-ca-v24zk" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.450030 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a18d0f06-2fff-4e1a-9b11-01eaea85baa1-serviceca\") pod \"node-ca-v24zk\" (UID: \"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\") " pod="openshift-image-registry/node-ca-v24zk" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.458504 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.470715 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gdv8\" (UniqueName: \"kubernetes.io/projected/a18d0f06-2fff-4e1a-9b11-01eaea85baa1-kube-api-access-8gdv8\") pod \"node-ca-v24zk\" (UID: \"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\") " pod="openshift-image-registry/node-ca-v24zk" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.474632 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.483839 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.483929 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.483943 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.483990 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.484017 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:41Z","lastTransitionTime":"2025-11-25T09:44:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.492258 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.507813 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.519758 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.525407 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-v24zk" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.532919 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: W1125 09:44:41.541670 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda18d0f06_2fff_4e1a_9b11_01eaea85baa1.slice/crio-ced6c00f5644f5e40031c1c9ef92c96cd3190fc1ba545e09d097e60e9b5ed546 WatchSource:0}: Error finding container ced6c00f5644f5e40031c1c9ef92c96cd3190fc1ba545e09d097e60e9b5ed546: Status 404 returned error can't find the container with id ced6c00f5644f5e40031c1c9ef92c96cd3190fc1ba545e09d097e60e9b5ed546 Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.544729 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.561813 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.582102 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.587992 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.588029 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.588042 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.588064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.588077 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:41Z","lastTransitionTime":"2025-11-25T09:44:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.597523 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.612220 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.626218 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.640416 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:41Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.691937 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.692027 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.692047 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.692070 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.692081 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:41Z","lastTransitionTime":"2025-11-25T09:44:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.794783 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.794839 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.794852 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.794871 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.794883 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:41Z","lastTransitionTime":"2025-11-25T09:44:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.898916 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.899497 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.899510 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.899530 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:41 crc kubenswrapper[4769]: I1125 09:44:41.899542 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:41Z","lastTransitionTime":"2025-11-25T09:44:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.002286 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.002641 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.002815 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.002998 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.003323 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:42Z","lastTransitionTime":"2025-11-25T09:44:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.106789 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.106857 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.106875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.106898 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.106914 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:42Z","lastTransitionTime":"2025-11-25T09:44:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.210485 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.210537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.210547 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.210566 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.210578 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:42Z","lastTransitionTime":"2025-11-25T09:44:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.249756 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.266111 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.288221 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.304210 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.313623 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.313788 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.313825 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.314075 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.314104 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:42Z","lastTransitionTime":"2025-11-25T09:44:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.325650 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.340474 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.355112 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.368809 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.384025 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.400803 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.416133 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.418589 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.418625 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.418640 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.418663 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.418678 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:42Z","lastTransitionTime":"2025-11-25T09:44:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.432727 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.433670 4769 generic.go:334] "Generic (PLEG): container finished" podID="46575153-4800-4ed2-8aa3-b66b98a9c899" containerID="38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192" exitCode=0 Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.433752 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" event={"ID":"46575153-4800-4ed2-8aa3-b66b98a9c899","Type":"ContainerDied","Data":"38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.437715 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-v24zk" event={"ID":"a18d0f06-2fff-4e1a-9b11-01eaea85baa1","Type":"ContainerStarted","Data":"181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.437760 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-v24zk" event={"ID":"a18d0f06-2fff-4e1a-9b11-01eaea85baa1","Type":"ContainerStarted","Data":"ced6c00f5644f5e40031c1c9ef92c96cd3190fc1ba545e09d097e60e9b5ed546"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.452829 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.470681 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.491440 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.517418 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.521167 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.521193 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.521202 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.521218 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.521229 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:42Z","lastTransitionTime":"2025-11-25T09:44:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.536779 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.554387 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.567103 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.578638 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.592041 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.603520 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.618304 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.623464 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.623496 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.623507 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.623524 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.623535 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:42Z","lastTransitionTime":"2025-11-25T09:44:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.634088 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.649851 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.666053 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.680840 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.694604 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.726559 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.726595 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.726605 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.726624 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.726635 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:42Z","lastTransitionTime":"2025-11-25T09:44:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.829596 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.829653 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.829667 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.829686 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.829700 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:42Z","lastTransitionTime":"2025-11-25T09:44:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.932811 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.932856 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.932865 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.932881 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:42 crc kubenswrapper[4769]: I1125 09:44:42.932891 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:42Z","lastTransitionTime":"2025-11-25T09:44:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.036948 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.037295 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.037307 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.037328 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.037344 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:43Z","lastTransitionTime":"2025-11-25T09:44:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.140151 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.140207 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.140224 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.140246 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.140264 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:43Z","lastTransitionTime":"2025-11-25T09:44:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.236408 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.236488 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:43 crc kubenswrapper[4769]: E1125 09:44:43.236556 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.236408 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:43 crc kubenswrapper[4769]: E1125 09:44:43.236643 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:43 crc kubenswrapper[4769]: E1125 09:44:43.236751 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.243397 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.243750 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.243780 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.243801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.243843 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:43Z","lastTransitionTime":"2025-11-25T09:44:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.347146 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.347192 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.347204 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.347223 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.347238 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:43Z","lastTransitionTime":"2025-11-25T09:44:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.445386 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6"} Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.449472 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.449508 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.449518 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.449534 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.449548 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:43Z","lastTransitionTime":"2025-11-25T09:44:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.455653 4769 generic.go:334] "Generic (PLEG): container finished" podID="46575153-4800-4ed2-8aa3-b66b98a9c899" containerID="0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296" exitCode=0 Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.455707 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" event={"ID":"46575153-4800-4ed2-8aa3-b66b98a9c899","Type":"ContainerDied","Data":"0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296"} Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.472957 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.500931 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.517908 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.534657 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.548425 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.553201 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.553265 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.553283 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.553313 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.553329 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:43Z","lastTransitionTime":"2025-11-25T09:44:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.561690 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.578170 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.592129 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.604016 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.619407 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.662614 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.662665 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.662679 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.662731 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.662744 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:43Z","lastTransitionTime":"2025-11-25T09:44:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.674536 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.703616 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.717846 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.733023 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:43Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.766064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.766114 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.766124 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.766145 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.766160 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:43Z","lastTransitionTime":"2025-11-25T09:44:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.869145 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.869226 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.869246 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.869276 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.869298 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:43Z","lastTransitionTime":"2025-11-25T09:44:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.972155 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.972223 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.972242 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.972267 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:43 crc kubenswrapper[4769]: I1125 09:44:43.972284 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:43Z","lastTransitionTime":"2025-11-25T09:44:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.075684 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.075745 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.075759 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.075783 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.075801 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:44Z","lastTransitionTime":"2025-11-25T09:44:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.178884 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.178948 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.178978 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.179000 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.179018 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:44Z","lastTransitionTime":"2025-11-25T09:44:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.281298 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.281375 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.281395 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.281423 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.281445 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:44Z","lastTransitionTime":"2025-11-25T09:44:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.384571 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.384644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.384665 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.384692 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.384710 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:44Z","lastTransitionTime":"2025-11-25T09:44:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.486331 4769 generic.go:334] "Generic (PLEG): container finished" podID="46575153-4800-4ed2-8aa3-b66b98a9c899" containerID="937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d" exitCode=0 Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.486398 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" event={"ID":"46575153-4800-4ed2-8aa3-b66b98a9c899","Type":"ContainerDied","Data":"937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d"} Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.486756 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.486812 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.486830 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.486859 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.486879 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:44Z","lastTransitionTime":"2025-11-25T09:44:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.503753 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.518518 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.536021 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.557115 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.575861 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.589753 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.590195 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.590295 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.590309 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.590339 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.590354 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:44Z","lastTransitionTime":"2025-11-25T09:44:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.606596 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.626318 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.641919 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.657667 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.672217 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.687217 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.693399 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.693691 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.693826 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.693921 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.694054 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:44Z","lastTransitionTime":"2025-11-25T09:44:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.704086 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.721232 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:44Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.798687 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.798749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.798764 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.798788 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.798802 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:44Z","lastTransitionTime":"2025-11-25T09:44:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.902637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.902686 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.902698 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.902724 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:44 crc kubenswrapper[4769]: I1125 09:44:44.902736 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:44Z","lastTransitionTime":"2025-11-25T09:44:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.005673 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.005732 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.005745 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.005768 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.005782 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:45Z","lastTransitionTime":"2025-11-25T09:44:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.109342 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.109392 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.109405 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.109428 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.109441 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:45Z","lastTransitionTime":"2025-11-25T09:44:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.212610 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.212668 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.212687 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.212712 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.212727 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:45Z","lastTransitionTime":"2025-11-25T09:44:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.236775 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.236807 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.236881 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:45 crc kubenswrapper[4769]: E1125 09:44:45.236954 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:45 crc kubenswrapper[4769]: E1125 09:44:45.237183 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:45 crc kubenswrapper[4769]: E1125 09:44:45.237291 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.315721 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.315778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.315822 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.315846 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.315859 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:45Z","lastTransitionTime":"2025-11-25T09:44:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.420857 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.420905 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.420917 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.420938 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.420952 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:45Z","lastTransitionTime":"2025-11-25T09:44:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.498325 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.498856 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.504997 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" event={"ID":"46575153-4800-4ed2-8aa3-b66b98a9c899","Type":"ContainerStarted","Data":"3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.516494 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.524122 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.524198 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.524217 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.524246 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.524264 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:45Z","lastTransitionTime":"2025-11-25T09:44:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.534717 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.550993 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.553013 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.569820 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.586148 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.601165 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.621623 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.627008 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.627060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.627076 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.627102 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.627118 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:45Z","lastTransitionTime":"2025-11-25T09:44:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.635791 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.660221 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.677501 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.695685 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.713058 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.719212 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.726867 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.730149 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.730214 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.730234 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.730262 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.730277 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:45Z","lastTransitionTime":"2025-11-25T09:44:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.747416 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.763858 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.776921 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.787945 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.799455 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.812388 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.825373 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.833093 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.833147 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.833161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.833181 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.833197 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:45Z","lastTransitionTime":"2025-11-25T09:44:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.841441 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.868587 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.885489 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.906179 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.922528 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.936115 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.936177 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.936189 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.936209 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.936222 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:45Z","lastTransitionTime":"2025-11-25T09:44:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.940153 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.949452 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.957669 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:45 crc kubenswrapper[4769]: I1125 09:44:45.974114 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:45Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.038994 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.039091 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.039106 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.039133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.039155 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:46Z","lastTransitionTime":"2025-11-25T09:44:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.143065 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.143133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.143146 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.143170 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.143186 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:46Z","lastTransitionTime":"2025-11-25T09:44:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.246025 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.246086 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.246101 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.246124 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.246143 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:46Z","lastTransitionTime":"2025-11-25T09:44:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.349737 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.349807 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.349824 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.349849 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.349863 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:46Z","lastTransitionTime":"2025-11-25T09:44:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.453033 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.453127 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.453154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.453190 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.453215 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:46Z","lastTransitionTime":"2025-11-25T09:44:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.509415 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.536990 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.552452 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.556101 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.556151 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.556164 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.556184 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.556194 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:46Z","lastTransitionTime":"2025-11-25T09:44:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.568790 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.583355 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.597319 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.612999 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.626618 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.641077 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.654378 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.664787 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.664845 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.664858 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.664877 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.664893 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:46Z","lastTransitionTime":"2025-11-25T09:44:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.677832 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.691548 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.704812 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.721127 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.741669 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.756246 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:46Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.767602 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.767660 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.767675 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.767695 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.767707 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:46Z","lastTransitionTime":"2025-11-25T09:44:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.820853 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.821265 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.821322 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.821344 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.821472 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:45:02.821403731 +0000 UTC m=+51.406376044 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.871057 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.871118 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.871135 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.871161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.871179 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:46Z","lastTransitionTime":"2025-11-25T09:44:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.922321 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.922444 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.922501 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:45:02.922479174 +0000 UTC m=+51.507451497 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.922542 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.922568 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.922592 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.922631 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:45:02.922619958 +0000 UTC m=+51.507592271 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.922698 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.922715 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.922728 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.922743 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.922771 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:45:02.922760512 +0000 UTC m=+51.507732835 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:44:46 crc kubenswrapper[4769]: E1125 09:44:46.922903 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:45:02.922878995 +0000 UTC m=+51.507851308 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.973454 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.973514 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.973528 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.973551 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:46 crc kubenswrapper[4769]: I1125 09:44:46.973569 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:46Z","lastTransitionTime":"2025-11-25T09:44:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.076346 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.076400 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.076417 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.076438 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.076454 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.179136 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.179179 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.179192 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.179209 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.179220 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.236414 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.236616 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:47 crc kubenswrapper[4769]: E1125 09:44:47.236809 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.236898 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:47 crc kubenswrapper[4769]: E1125 09:44:47.237149 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:47 crc kubenswrapper[4769]: E1125 09:44:47.237350 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.282688 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.282730 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.282741 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.282759 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.282773 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.385509 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.385578 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.385595 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.385619 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.385636 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.487859 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.487920 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.487932 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.487990 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.488006 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.590300 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.590346 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.590357 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.590374 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.590389 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.698987 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.699065 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.699080 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.699099 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.699111 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.801421 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.801492 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.801503 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.801521 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.801534 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.855098 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.855182 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.855195 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.855220 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.855233 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: E1125 09:44:47.872294 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.878206 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.878246 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.878258 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.878278 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.878295 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: E1125 09:44:47.896171 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.908267 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.908337 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.908352 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.908387 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.908431 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: E1125 09:44:47.926866 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.931770 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.931823 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.931835 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.931855 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.931868 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: E1125 09:44:47.953007 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.957522 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.957569 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.957596 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.957613 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.957625 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:47 crc kubenswrapper[4769]: E1125 09:44:47.972024 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:47Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:47Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:47 crc kubenswrapper[4769]: E1125 09:44:47.972191 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.974002 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.974049 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.974060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.974077 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:47 crc kubenswrapper[4769]: I1125 09:44:47.974087 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:47Z","lastTransitionTime":"2025-11-25T09:44:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.076843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.076905 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.076917 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.076935 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.076948 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:48Z","lastTransitionTime":"2025-11-25T09:44:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.179490 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.179540 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.179552 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.179572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.179588 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:48Z","lastTransitionTime":"2025-11-25T09:44:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.282362 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.282412 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.282425 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.282442 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.282452 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:48Z","lastTransitionTime":"2025-11-25T09:44:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.385041 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.385109 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.385125 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.385160 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.385177 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:48Z","lastTransitionTime":"2025-11-25T09:44:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.488079 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.488401 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.488412 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.488427 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.488438 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:48Z","lastTransitionTime":"2025-11-25T09:44:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.591139 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.591182 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.591193 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.591208 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.591222 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:48Z","lastTransitionTime":"2025-11-25T09:44:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.694101 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.694160 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.694171 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.694191 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.694203 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:48Z","lastTransitionTime":"2025-11-25T09:44:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.798327 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.798392 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.798406 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.798431 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.798449 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:48Z","lastTransitionTime":"2025-11-25T09:44:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.901061 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.901115 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.901127 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.901150 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:48 crc kubenswrapper[4769]: I1125 09:44:48.901164 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:48Z","lastTransitionTime":"2025-11-25T09:44:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.004734 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.004812 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.004825 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.004849 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.004864 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:49Z","lastTransitionTime":"2025-11-25T09:44:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.107471 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.107513 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.107525 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.107544 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.107556 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:49Z","lastTransitionTime":"2025-11-25T09:44:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.210133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.210195 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.210206 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.210225 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.210238 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:49Z","lastTransitionTime":"2025-11-25T09:44:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.236517 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.236601 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.236634 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:49 crc kubenswrapper[4769]: E1125 09:44:49.236726 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:49 crc kubenswrapper[4769]: E1125 09:44:49.236852 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:49 crc kubenswrapper[4769]: E1125 09:44:49.236936 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.313068 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.313131 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.313147 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.313174 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.313194 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:49Z","lastTransitionTime":"2025-11-25T09:44:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.416422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.416483 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.416500 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.416526 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.416546 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:49Z","lastTransitionTime":"2025-11-25T09:44:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.518036 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.518067 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.518074 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.518086 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.518096 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:49Z","lastTransitionTime":"2025-11-25T09:44:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.522776 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/0.log" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.525062 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9" exitCode=1 Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.525105 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9"} Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.525698 4769 scope.go:117] "RemoveContainer" containerID="7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.540625 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.558178 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.575009 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.589572 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.603784 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.615992 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.620834 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.620866 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.620878 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.620895 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.620908 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:49Z","lastTransitionTime":"2025-11-25T09:44:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.626192 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.639726 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.652909 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.664758 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.682729 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.703974 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:48Z\\\",\\\"message\\\":\\\"11] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.625906 6069 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626044 6069 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.626215 6069 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626415 6069 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626665 6069 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627039 6069 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627448 6069 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.719039 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.731048 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.731078 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.731088 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.731104 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.731114 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:49Z","lastTransitionTime":"2025-11-25T09:44:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.734581 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.834108 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.834160 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.834174 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.834200 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.834215 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:49Z","lastTransitionTime":"2025-11-25T09:44:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.938077 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.938129 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.938144 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.938163 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:49 crc kubenswrapper[4769]: I1125 09:44:49.938176 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:49Z","lastTransitionTime":"2025-11-25T09:44:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.041689 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.041733 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.041745 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.041763 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.041773 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:50Z","lastTransitionTime":"2025-11-25T09:44:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.145615 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.145653 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.145664 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.145680 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.145692 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:50Z","lastTransitionTime":"2025-11-25T09:44:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.248675 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.248740 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.248754 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.248777 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.248791 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:50Z","lastTransitionTime":"2025-11-25T09:44:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.352783 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.352847 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.352864 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.352889 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.352908 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:50Z","lastTransitionTime":"2025-11-25T09:44:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.455474 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.455536 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.455554 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.455581 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.455601 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:50Z","lastTransitionTime":"2025-11-25T09:44:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.531168 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/0.log" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.534171 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9"} Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.534813 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.546007 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt"] Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.546607 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:50 crc kubenswrapper[4769]: W1125 09:44:50.553447 4769 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd": failed to list *v1.Secret: secrets "ovn-kubernetes-control-plane-dockercfg-gs7dd" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Nov 25 09:44:50 crc kubenswrapper[4769]: E1125 09:44:50.553514 4769 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-control-plane-dockercfg-gs7dd\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovn-kubernetes-control-plane-dockercfg-gs7dd\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 09:44:50 crc kubenswrapper[4769]: W1125 09:44:50.553586 4769 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert": failed to list *v1.Secret: secrets "ovn-control-plane-metrics-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Nov 25 09:44:50 crc kubenswrapper[4769]: E1125 09:44:50.553663 4769 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovn-control-plane-metrics-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"ovn-control-plane-metrics-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.556686 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.558926 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.558997 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.559009 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.559031 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.559061 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:50Z","lastTransitionTime":"2025-11-25T09:44:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.561159 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/37fe6f3a-f80b-4d79-9825-8f1c67c64d5c-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-wvkdt\" (UID: \"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.561336 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6llt\" (UniqueName: \"kubernetes.io/projected/37fe6f3a-f80b-4d79-9825-8f1c67c64d5c-kube-api-access-j6llt\") pod \"ovnkube-control-plane-749d76644c-wvkdt\" (UID: \"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.561418 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/37fe6f3a-f80b-4d79-9825-8f1c67c64d5c-env-overrides\") pod \"ovnkube-control-plane-749d76644c-wvkdt\" (UID: \"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.561547 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/37fe6f3a-f80b-4d79-9825-8f1c67c64d5c-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-wvkdt\" (UID: \"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.586436 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.601059 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.611070 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.625065 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.636191 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.646378 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.661637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.661712 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.661724 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.661762 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.661777 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:50Z","lastTransitionTime":"2025-11-25T09:44:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.661928 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.662478 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/37fe6f3a-f80b-4d79-9825-8f1c67c64d5c-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-wvkdt\" (UID: \"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.662511 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6llt\" (UniqueName: \"kubernetes.io/projected/37fe6f3a-f80b-4d79-9825-8f1c67c64d5c-kube-api-access-j6llt\") pod \"ovnkube-control-plane-749d76644c-wvkdt\" (UID: \"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.663189 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/37fe6f3a-f80b-4d79-9825-8f1c67c64d5c-env-overrides\") pod \"ovnkube-control-plane-749d76644c-wvkdt\" (UID: \"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.663265 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/37fe6f3a-f80b-4d79-9825-8f1c67c64d5c-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-wvkdt\" (UID: \"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.663461 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/37fe6f3a-f80b-4d79-9825-8f1c67c64d5c-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-wvkdt\" (UID: \"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.663842 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/37fe6f3a-f80b-4d79-9825-8f1c67c64d5c-env-overrides\") pod \"ovnkube-control-plane-749d76644c-wvkdt\" (UID: \"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.680924 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:48Z\\\",\\\"message\\\":\\\"11] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.625906 6069 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626044 6069 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.626215 6069 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626415 6069 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626665 6069 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627039 6069 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627448 6069 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.688064 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6llt\" (UniqueName: \"kubernetes.io/projected/37fe6f3a-f80b-4d79-9825-8f1c67c64d5c-kube-api-access-j6llt\") pod \"ovnkube-control-plane-749d76644c-wvkdt\" (UID: \"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.696605 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.710709 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.722703 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.734942 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.750768 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.764393 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.765015 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.765060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.765077 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.765105 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.765123 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:50Z","lastTransitionTime":"2025-11-25T09:44:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.775865 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.789148 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.803405 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.818873 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.838299 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.857393 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.867935 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.868013 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.868028 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.868051 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.868065 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:50Z","lastTransitionTime":"2025-11-25T09:44:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.886116 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.912217 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.925559 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.941382 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.962680 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:48Z\\\",\\\"message\\\":\\\"11] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.625906 6069 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626044 6069 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.626215 6069 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626415 6069 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626665 6069 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627039 6069 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627448 6069 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.970521 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.970587 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.970633 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.970659 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.970676 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:50Z","lastTransitionTime":"2025-11-25T09:44:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.981732 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:50 crc kubenswrapper[4769]: I1125 09:44:50.993494 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.008709 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.073544 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.073599 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.073612 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.073629 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.073641 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:51Z","lastTransitionTime":"2025-11-25T09:44:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.176640 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.176690 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.176701 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.176719 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.176731 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:51Z","lastTransitionTime":"2025-11-25T09:44:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.235812 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.236066 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:51 crc kubenswrapper[4769]: E1125 09:44:51.236096 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.236137 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:51 crc kubenswrapper[4769]: E1125 09:44:51.236302 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:51 crc kubenswrapper[4769]: E1125 09:44:51.236413 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.280058 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.280168 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.280190 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.280223 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.280237 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:51Z","lastTransitionTime":"2025-11-25T09:44:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.386464 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.386572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.386601 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.386640 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.386722 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:51Z","lastTransitionTime":"2025-11-25T09:44:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.492309 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.492400 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.492433 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.492466 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.492488 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:51Z","lastTransitionTime":"2025-11-25T09:44:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.541418 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/1.log" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.542733 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/0.log" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.546375 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9" exitCode=1 Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.546449 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9"} Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.546542 4769 scope.go:117] "RemoveContainer" containerID="7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.547744 4769 scope.go:117] "RemoveContainer" containerID="5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9" Nov 25 09:44:51 crc kubenswrapper[4769]: E1125 09:44:51.548129 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\"" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.568588 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.587989 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.595020 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.595072 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.595082 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.595108 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.595119 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:51Z","lastTransitionTime":"2025-11-25T09:44:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.607853 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.620933 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.624263 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.628156 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.639407 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/37fe6f3a-f80b-4d79-9825-8f1c67c64d5c-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-wvkdt\" (UID: \"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.640690 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.653762 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.668248 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.687167 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.697403 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.697447 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.697458 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.697477 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.697487 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:51Z","lastTransitionTime":"2025-11-25T09:44:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.698953 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.717950 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:48Z\\\",\\\"message\\\":\\\"11] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.625906 6069 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626044 6069 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.626215 6069 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626415 6069 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626665 6069 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627039 6069 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627448 6069 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:51Z\\\",\\\"message\\\":\\\"moved *v1.Namespace event handler 1\\\\nI1125 09:44:50.984553 6229 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:44:50.984560 6229 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:44:50.984879 6229 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:44:50.984929 6229 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:44:50.984942 6229 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:44:50.984991 6229 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:44:50.985033 6229 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:44:50.985064 6229 factory.go:656] Stopping watch factory\\\\nI1125 09:44:50.985082 6229 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:44:50.985119 6229 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:44:50.985146 6229 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:44:50.985159 6229 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 09:44:50.985166 6229 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:44:50.985175 6229 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:44:50.985188 6229 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 09:44:50.985297 6229 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.730531 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.743136 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.755497 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.764917 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.767772 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.782241 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:51Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:51 crc kubenswrapper[4769]: W1125 09:44:51.782875 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37fe6f3a_f80b_4d79_9825_8f1c67c64d5c.slice/crio-c8df139e04f09c3a77582f4fdb98b7baf18f6308c40e2d76b173bebc248be966 WatchSource:0}: Error finding container c8df139e04f09c3a77582f4fdb98b7baf18f6308c40e2d76b173bebc248be966: Status 404 returned error can't find the container with id c8df139e04f09c3a77582f4fdb98b7baf18f6308c40e2d76b173bebc248be966 Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.800330 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.800380 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.800395 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.800416 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.800429 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:51Z","lastTransitionTime":"2025-11-25T09:44:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.904418 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.904483 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.904496 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.904524 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:51 crc kubenswrapper[4769]: I1125 09:44:51.904537 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:51Z","lastTransitionTime":"2025-11-25T09:44:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.008020 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.008107 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.008123 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.008172 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.008191 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:52Z","lastTransitionTime":"2025-11-25T09:44:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.024181 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-7khh9"] Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.024995 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:52 crc kubenswrapper[4769]: E1125 09:44:52.025097 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.043076 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.057415 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.076576 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6phx\" (UniqueName: \"kubernetes.io/projected/76770e00-0d61-45ae-9772-1e8c42dc6ea6-kube-api-access-l6phx\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.076627 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.076737 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.092937 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.105469 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.110830 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.111066 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.111139 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.111218 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.111304 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:52Z","lastTransitionTime":"2025-11-25T09:44:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.115692 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.128326 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.144153 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.155124 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.177407 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6phx\" (UniqueName: \"kubernetes.io/projected/76770e00-0d61-45ae-9772-1e8c42dc6ea6-kube-api-access-l6phx\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.177466 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:52 crc kubenswrapper[4769]: E1125 09:44:52.177626 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:44:52 crc kubenswrapper[4769]: E1125 09:44:52.177706 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs podName:76770e00-0d61-45ae-9772-1e8c42dc6ea6 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:52.677686738 +0000 UTC m=+41.262659051 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs") pod "network-metrics-daemon-7khh9" (UID: "76770e00-0d61-45ae-9772-1e8c42dc6ea6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.179287 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.201155 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6phx\" (UniqueName: \"kubernetes.io/projected/76770e00-0d61-45ae-9772-1e8c42dc6ea6-kube-api-access-l6phx\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.206184 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:48Z\\\",\\\"message\\\":\\\"11] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.625906 6069 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626044 6069 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.626215 6069 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626415 6069 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626665 6069 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627039 6069 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627448 6069 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:51Z\\\",\\\"message\\\":\\\"moved *v1.Namespace event handler 1\\\\nI1125 09:44:50.984553 6229 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:44:50.984560 6229 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:44:50.984879 6229 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:44:50.984929 6229 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:44:50.984942 6229 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:44:50.984991 6229 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:44:50.985033 6229 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:44:50.985064 6229 factory.go:656] Stopping watch factory\\\\nI1125 09:44:50.985082 6229 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:44:50.985119 6229 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:44:50.985146 6229 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:44:50.985159 6229 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 09:44:50.985166 6229 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:44:50.985175 6229 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:44:50.985188 6229 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 09:44:50.985297 6229 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.214051 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.214103 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.214117 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.214137 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.214149 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:52Z","lastTransitionTime":"2025-11-25T09:44:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.225033 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.245804 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.261288 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.276442 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.289488 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.305516 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.317287 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.317347 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.317364 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.317389 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.317403 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:52Z","lastTransitionTime":"2025-11-25T09:44:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.324098 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.340446 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.354765 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.371119 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.385260 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.403695 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.420022 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.420077 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.420092 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.420112 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.420123 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:52Z","lastTransitionTime":"2025-11-25T09:44:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.420890 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.434073 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.447990 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.469614 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:48Z\\\",\\\"message\\\":\\\"11] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.625906 6069 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626044 6069 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.626215 6069 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626415 6069 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626665 6069 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627039 6069 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627448 6069 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:51Z\\\",\\\"message\\\":\\\"moved *v1.Namespace event handler 1\\\\nI1125 09:44:50.984553 6229 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:44:50.984560 6229 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:44:50.984879 6229 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:44:50.984929 6229 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:44:50.984942 6229 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:44:50.984991 6229 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:44:50.985033 6229 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:44:50.985064 6229 factory.go:656] Stopping watch factory\\\\nI1125 09:44:50.985082 6229 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:44:50.985119 6229 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:44:50.985146 6229 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:44:50.985159 6229 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 09:44:50.985166 6229 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:44:50.985175 6229 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:44:50.985188 6229 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 09:44:50.985297 6229 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.486389 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.499676 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.512222 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.523786 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.523820 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.523832 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.523852 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.523863 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:52Z","lastTransitionTime":"2025-11-25T09:44:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.523824 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.544698 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.552705 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" event={"ID":"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c","Type":"ContainerStarted","Data":"7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.552770 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" event={"ID":"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c","Type":"ContainerStarted","Data":"d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.552839 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" event={"ID":"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c","Type":"ContainerStarted","Data":"c8df139e04f09c3a77582f4fdb98b7baf18f6308c40e2d76b173bebc248be966"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.554482 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/1.log" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.557908 4769 scope.go:117] "RemoveContainer" containerID="5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9" Nov 25 09:44:52 crc kubenswrapper[4769]: E1125 09:44:52.558164 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\"" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.567522 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.582135 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.595537 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.606767 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.620869 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.626236 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.626299 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.626316 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.626340 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.626357 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:52Z","lastTransitionTime":"2025-11-25T09:44:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.633998 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.648421 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.663159 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.681505 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.682786 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:52 crc kubenswrapper[4769]: E1125 09:44:52.683039 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:44:52 crc kubenswrapper[4769]: E1125 09:44:52.683173 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs podName:76770e00-0d61-45ae-9772-1e8c42dc6ea6 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:53.683155168 +0000 UTC m=+42.268127481 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs") pod "network-metrics-daemon-7khh9" (UID: "76770e00-0d61-45ae-9772-1e8c42dc6ea6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.704679 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7a0c37bc8a4086c4243e9cbb3dae0806cacc52cf8300fbdde90a59e01f5b7cc9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:48Z\\\",\\\"message\\\":\\\"11] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.625906 6069 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626044 6069 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 09:44:48.626215 6069 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626415 6069 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 09:44:48.626665 6069 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627039 6069 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 09:44:48.627448 6069 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:51Z\\\",\\\"message\\\":\\\"moved *v1.Namespace event handler 1\\\\nI1125 09:44:50.984553 6229 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:44:50.984560 6229 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:44:50.984879 6229 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:44:50.984929 6229 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:44:50.984942 6229 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:44:50.984991 6229 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:44:50.985033 6229 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:44:50.985064 6229 factory.go:656] Stopping watch factory\\\\nI1125 09:44:50.985082 6229 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:44:50.985119 6229 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:44:50.985146 6229 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:44:50.985159 6229 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 09:44:50.985166 6229 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:44:50.985175 6229 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:44:50.985188 6229 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 09:44:50.985297 6229 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.718529 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.729072 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.729124 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.729181 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.729202 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.729213 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:52Z","lastTransitionTime":"2025-11-25T09:44:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.732558 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.746235 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.761359 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.775333 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.790484 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.806286 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.822693 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.832346 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.832582 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.832617 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.832644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.832664 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:52Z","lastTransitionTime":"2025-11-25T09:44:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.836734 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.850735 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.864083 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.880529 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.895849 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.909215 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.920695 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.931895 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.936133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.936173 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.936187 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.936210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.936224 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:52Z","lastTransitionTime":"2025-11-25T09:44:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.949847 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.964235 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.975933 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:52 crc kubenswrapper[4769]: I1125 09:44:52.990333 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.014058 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:51Z\\\",\\\"message\\\":\\\"moved *v1.Namespace event handler 1\\\\nI1125 09:44:50.984553 6229 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:44:50.984560 6229 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:44:50.984879 6229 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:44:50.984929 6229 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:44:50.984942 6229 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:44:50.984991 6229 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:44:50.985033 6229 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:44:50.985064 6229 factory.go:656] Stopping watch factory\\\\nI1125 09:44:50.985082 6229 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:44:50.985119 6229 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:44:50.985146 6229 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:44:50.985159 6229 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 09:44:50.985166 6229 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:44:50.985175 6229 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:44:50.985188 6229 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 09:44:50.985297 6229 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.029094 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:53Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.038830 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.038901 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.038917 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.038941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.038955 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:53Z","lastTransitionTime":"2025-11-25T09:44:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.141537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.141588 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.141602 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.141639 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.141650 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:53Z","lastTransitionTime":"2025-11-25T09:44:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.236266 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.236365 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.236266 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:53 crc kubenswrapper[4769]: E1125 09:44:53.236512 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:53 crc kubenswrapper[4769]: E1125 09:44:53.236616 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:53 crc kubenswrapper[4769]: E1125 09:44:53.236725 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.244240 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.244280 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.244290 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.244307 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.244317 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:53Z","lastTransitionTime":"2025-11-25T09:44:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.348058 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.348113 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.348125 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.348148 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.348167 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:53Z","lastTransitionTime":"2025-11-25T09:44:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.451275 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.451326 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.451335 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.451354 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.451365 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:53Z","lastTransitionTime":"2025-11-25T09:44:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.554079 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.554123 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.554133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.554149 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.554165 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:53Z","lastTransitionTime":"2025-11-25T09:44:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.661407 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.661472 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.661486 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.661504 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.661518 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:53Z","lastTransitionTime":"2025-11-25T09:44:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.693674 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:53 crc kubenswrapper[4769]: E1125 09:44:53.693941 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:44:53 crc kubenswrapper[4769]: E1125 09:44:53.694095 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs podName:76770e00-0d61-45ae-9772-1e8c42dc6ea6 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:55.694069148 +0000 UTC m=+44.279041451 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs") pod "network-metrics-daemon-7khh9" (UID: "76770e00-0d61-45ae-9772-1e8c42dc6ea6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.764566 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.764627 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.764640 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.764666 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.764682 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:53Z","lastTransitionTime":"2025-11-25T09:44:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.867685 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.867758 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.867780 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.867808 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.867831 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:53Z","lastTransitionTime":"2025-11-25T09:44:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.971004 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.971081 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.971120 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.971151 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:53 crc kubenswrapper[4769]: I1125 09:44:53.971173 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:53Z","lastTransitionTime":"2025-11-25T09:44:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.074383 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.074450 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.074467 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.074491 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.074510 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:54Z","lastTransitionTime":"2025-11-25T09:44:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.177593 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.177627 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.177636 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.177652 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.177662 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:54Z","lastTransitionTime":"2025-11-25T09:44:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.235933 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:54 crc kubenswrapper[4769]: E1125 09:44:54.236186 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.280294 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.280348 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.280369 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.280398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.280421 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:54Z","lastTransitionTime":"2025-11-25T09:44:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.383321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.383396 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.383423 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.383453 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.383478 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:54Z","lastTransitionTime":"2025-11-25T09:44:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.486230 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.486272 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.486282 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.486298 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.486309 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:54Z","lastTransitionTime":"2025-11-25T09:44:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.589810 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.589877 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.589892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.589919 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.589935 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:54Z","lastTransitionTime":"2025-11-25T09:44:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.693699 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.693754 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.693764 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.693784 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.693797 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:54Z","lastTransitionTime":"2025-11-25T09:44:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.796635 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.796712 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.796730 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.796756 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.796772 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:54Z","lastTransitionTime":"2025-11-25T09:44:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.900833 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.900883 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.900892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.900941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:54 crc kubenswrapper[4769]: I1125 09:44:54.900954 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:54Z","lastTransitionTime":"2025-11-25T09:44:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.004039 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.004113 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.004152 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.004193 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.004220 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:55Z","lastTransitionTime":"2025-11-25T09:44:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.108310 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.108365 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.108381 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.108408 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.108423 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:55Z","lastTransitionTime":"2025-11-25T09:44:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.211707 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.211787 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.211806 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.211834 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.211853 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:55Z","lastTransitionTime":"2025-11-25T09:44:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.236191 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.236260 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.236261 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:55 crc kubenswrapper[4769]: E1125 09:44:55.236402 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:55 crc kubenswrapper[4769]: E1125 09:44:55.236608 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:55 crc kubenswrapper[4769]: E1125 09:44:55.236751 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.314690 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.314749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.314760 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.314782 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.314796 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:55Z","lastTransitionTime":"2025-11-25T09:44:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.417924 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.418004 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.418016 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.418036 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.418047 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:55Z","lastTransitionTime":"2025-11-25T09:44:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.520562 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.520614 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.520624 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.520638 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.520646 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:55Z","lastTransitionTime":"2025-11-25T09:44:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.623533 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.623610 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.623628 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.623657 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.623679 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:55Z","lastTransitionTime":"2025-11-25T09:44:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.716484 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:55 crc kubenswrapper[4769]: E1125 09:44:55.716808 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:44:55 crc kubenswrapper[4769]: E1125 09:44:55.716953 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs podName:76770e00-0d61-45ae-9772-1e8c42dc6ea6 nodeName:}" failed. No retries permitted until 2025-11-25 09:44:59.716916435 +0000 UTC m=+48.301888908 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs") pod "network-metrics-daemon-7khh9" (UID: "76770e00-0d61-45ae-9772-1e8c42dc6ea6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.726338 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.726390 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.726402 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.726424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.726437 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:55Z","lastTransitionTime":"2025-11-25T09:44:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.829294 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.829337 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.829348 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.829362 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.829372 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:55Z","lastTransitionTime":"2025-11-25T09:44:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.933602 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.933652 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.933663 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.933686 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:55 crc kubenswrapper[4769]: I1125 09:44:55.933697 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:55Z","lastTransitionTime":"2025-11-25T09:44:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.036672 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.037196 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.037246 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.037278 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.037304 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:56Z","lastTransitionTime":"2025-11-25T09:44:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.140415 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.144398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.144720 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.145551 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.145612 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:56Z","lastTransitionTime":"2025-11-25T09:44:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.236220 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:56 crc kubenswrapper[4769]: E1125 09:44:56.236377 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.247247 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.247463 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.247553 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.247650 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.247736 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:56Z","lastTransitionTime":"2025-11-25T09:44:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.349825 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.349890 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.349906 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.349931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.349949 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:56Z","lastTransitionTime":"2025-11-25T09:44:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.452923 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.452996 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.453016 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.453042 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.453058 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:56Z","lastTransitionTime":"2025-11-25T09:44:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.555408 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.555515 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.555540 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.556254 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.556274 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:56Z","lastTransitionTime":"2025-11-25T09:44:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.660123 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.660242 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.660265 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.660336 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.660358 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:56Z","lastTransitionTime":"2025-11-25T09:44:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.764153 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.764216 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.764231 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.764254 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.764273 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:56Z","lastTransitionTime":"2025-11-25T09:44:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.867044 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.867540 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.867550 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.867572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.867584 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:56Z","lastTransitionTime":"2025-11-25T09:44:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.970649 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.970708 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.970728 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.970761 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:56 crc kubenswrapper[4769]: I1125 09:44:56.970779 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:56Z","lastTransitionTime":"2025-11-25T09:44:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.074438 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.074475 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.074485 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.074502 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.074513 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:57Z","lastTransitionTime":"2025-11-25T09:44:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.177062 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.177118 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.177129 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.177148 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.177160 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:57Z","lastTransitionTime":"2025-11-25T09:44:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.236420 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.236489 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.236606 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:57 crc kubenswrapper[4769]: E1125 09:44:57.236745 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:57 crc kubenswrapper[4769]: E1125 09:44:57.236858 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:57 crc kubenswrapper[4769]: E1125 09:44:57.237112 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.280016 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.280084 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.280105 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.280135 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.280153 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:57Z","lastTransitionTime":"2025-11-25T09:44:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.383881 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.383950 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.383997 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.384023 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.384040 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:57Z","lastTransitionTime":"2025-11-25T09:44:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.487773 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.487844 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.487865 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.487890 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.487908 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:57Z","lastTransitionTime":"2025-11-25T09:44:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.590557 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.590619 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.590639 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.590663 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.590680 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:57Z","lastTransitionTime":"2025-11-25T09:44:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.694636 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.694769 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.694793 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.694850 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.694873 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:57Z","lastTransitionTime":"2025-11-25T09:44:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.798145 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.798217 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.798233 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.798259 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.798278 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:57Z","lastTransitionTime":"2025-11-25T09:44:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.902463 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.902518 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.902531 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.902549 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:57 crc kubenswrapper[4769]: I1125 09:44:57.902562 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:57Z","lastTransitionTime":"2025-11-25T09:44:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.006317 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.006365 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.006383 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.006404 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.006418 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.110113 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.110166 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.110183 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.110209 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.110227 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.213248 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.213311 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.213344 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.213376 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.213394 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.236022 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:58 crc kubenswrapper[4769]: E1125 09:44:58.236278 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.298685 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.298760 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.298772 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.298793 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.298805 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: E1125 09:44:58.314434 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.320170 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.320279 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.320344 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.320383 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.320452 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: E1125 09:44:58.339483 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.345423 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.345484 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.345502 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.345528 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.345545 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: E1125 09:44:58.366630 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.371832 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.371912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.371954 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.371996 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.372044 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: E1125 09:44:58.390946 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.396693 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.396749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.396766 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.396795 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.396814 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: E1125 09:44:58.415338 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:44:58Z is after 2025-08-24T17:21:41Z" Nov 25 09:44:58 crc kubenswrapper[4769]: E1125 09:44:58.415526 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.417861 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.417931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.417944 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.417993 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.418015 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.521189 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.521249 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.521263 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.521289 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.521304 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.624909 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.624978 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.624992 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.625014 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.625027 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.728632 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.728698 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.728708 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.728728 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.728741 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.832428 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.832496 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.832517 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.832546 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.832568 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.935979 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.936018 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.936027 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.936045 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:58 crc kubenswrapper[4769]: I1125 09:44:58.936084 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:58Z","lastTransitionTime":"2025-11-25T09:44:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.039230 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.039296 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.039313 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.039340 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.039360 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:59Z","lastTransitionTime":"2025-11-25T09:44:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.142842 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.142913 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.142926 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.142951 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.142998 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:59Z","lastTransitionTime":"2025-11-25T09:44:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.236202 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.236298 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:44:59 crc kubenswrapper[4769]: E1125 09:44:59.236370 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.236306 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:44:59 crc kubenswrapper[4769]: E1125 09:44:59.236532 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:44:59 crc kubenswrapper[4769]: E1125 09:44:59.236728 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.247927 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.248059 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.248089 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.248126 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.248153 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:59Z","lastTransitionTime":"2025-11-25T09:44:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.351448 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.351545 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.351572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.351611 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.351639 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:59Z","lastTransitionTime":"2025-11-25T09:44:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.455809 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.455858 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.455868 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.455889 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.455901 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:59Z","lastTransitionTime":"2025-11-25T09:44:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.559402 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.559487 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.559505 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.559532 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.559549 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:59Z","lastTransitionTime":"2025-11-25T09:44:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.663979 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.664025 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.664040 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.664065 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.664079 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:59Z","lastTransitionTime":"2025-11-25T09:44:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.767322 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.767388 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.767401 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.767426 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.767443 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:59Z","lastTransitionTime":"2025-11-25T09:44:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.769899 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:44:59 crc kubenswrapper[4769]: E1125 09:44:59.771743 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:44:59 crc kubenswrapper[4769]: E1125 09:44:59.771869 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs podName:76770e00-0d61-45ae-9772-1e8c42dc6ea6 nodeName:}" failed. No retries permitted until 2025-11-25 09:45:07.771824951 +0000 UTC m=+56.356797284 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs") pod "network-metrics-daemon-7khh9" (UID: "76770e00-0d61-45ae-9772-1e8c42dc6ea6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.870205 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.870258 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.870268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.870288 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.870301 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:59Z","lastTransitionTime":"2025-11-25T09:44:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.973523 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.973604 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.973628 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.973660 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:44:59 crc kubenswrapper[4769]: I1125 09:44:59.973683 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:44:59Z","lastTransitionTime":"2025-11-25T09:44:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.077546 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.077783 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.077806 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.077838 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.077856 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:00Z","lastTransitionTime":"2025-11-25T09:45:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.181002 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.181054 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.181065 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.181085 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.181097 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:00Z","lastTransitionTime":"2025-11-25T09:45:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.236757 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:00 crc kubenswrapper[4769]: E1125 09:45:00.236959 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.283827 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.283872 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.283887 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.283906 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.283920 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:00Z","lastTransitionTime":"2025-11-25T09:45:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.387115 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.387175 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.387187 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.387227 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.387240 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:00Z","lastTransitionTime":"2025-11-25T09:45:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.490737 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.491309 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.491400 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.491505 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.491598 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:00Z","lastTransitionTime":"2025-11-25T09:45:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.595267 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.595325 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.595339 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.595356 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.595373 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:00Z","lastTransitionTime":"2025-11-25T09:45:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.698328 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.698365 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.698375 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.698390 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.698401 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:00Z","lastTransitionTime":"2025-11-25T09:45:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.801817 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.802073 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.802187 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.802216 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.802233 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:00Z","lastTransitionTime":"2025-11-25T09:45:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.905272 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.905333 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.905346 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.905367 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:00 crc kubenswrapper[4769]: I1125 09:45:00.905383 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:00Z","lastTransitionTime":"2025-11-25T09:45:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.008070 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.008144 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.008159 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.008186 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.008200 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:01Z","lastTransitionTime":"2025-11-25T09:45:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.111420 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.111478 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.111493 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.111512 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.111524 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:01Z","lastTransitionTime":"2025-11-25T09:45:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.214789 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.214828 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.214838 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.214854 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.214864 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:01Z","lastTransitionTime":"2025-11-25T09:45:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.236626 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.236676 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.236763 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:01 crc kubenswrapper[4769]: E1125 09:45:01.236819 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:01 crc kubenswrapper[4769]: E1125 09:45:01.237117 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:01 crc kubenswrapper[4769]: E1125 09:45:01.237230 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.318249 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.318292 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.318303 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.318320 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.318332 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:01Z","lastTransitionTime":"2025-11-25T09:45:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.420704 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.421123 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.421193 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.421586 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.421703 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:01Z","lastTransitionTime":"2025-11-25T09:45:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.524600 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.524645 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.524654 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.524670 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.524682 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:01Z","lastTransitionTime":"2025-11-25T09:45:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.628079 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.628137 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.628161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.628180 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.628195 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:01Z","lastTransitionTime":"2025-11-25T09:45:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.731158 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.731206 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.731216 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.731240 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.731251 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:01Z","lastTransitionTime":"2025-11-25T09:45:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.835006 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.835062 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.835074 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.835094 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.835116 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:01Z","lastTransitionTime":"2025-11-25T09:45:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.937779 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.937842 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.937860 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.937887 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:01 crc kubenswrapper[4769]: I1125 09:45:01.937907 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:01Z","lastTransitionTime":"2025-11-25T09:45:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.041748 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.041813 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.041826 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.041849 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.041890 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:02Z","lastTransitionTime":"2025-11-25T09:45:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.145614 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.145674 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.145699 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.145722 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.145737 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:02Z","lastTransitionTime":"2025-11-25T09:45:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.236222 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:02 crc kubenswrapper[4769]: E1125 09:45:02.236372 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.250796 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.250838 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.250856 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.250876 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.250893 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:02Z","lastTransitionTime":"2025-11-25T09:45:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.258982 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.276728 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.291810 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.314390 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.340430 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:51Z\\\",\\\"message\\\":\\\"moved *v1.Namespace event handler 1\\\\nI1125 09:44:50.984553 6229 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:44:50.984560 6229 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:44:50.984879 6229 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:44:50.984929 6229 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:44:50.984942 6229 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:44:50.984991 6229 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:44:50.985033 6229 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:44:50.985064 6229 factory.go:656] Stopping watch factory\\\\nI1125 09:44:50.985082 6229 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:44:50.985119 6229 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:44:50.985146 6229 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:44:50.985159 6229 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 09:44:50.985166 6229 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:44:50.985175 6229 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:44:50.985188 6229 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 09:44:50.985297 6229 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.353327 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.353398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.353422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.353452 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.353475 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:02Z","lastTransitionTime":"2025-11-25T09:45:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.359464 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.373627 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.390507 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.407656 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.423423 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.438197 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.456489 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.456544 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.456557 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.456577 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.456591 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:02Z","lastTransitionTime":"2025-11-25T09:45:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.457318 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.476193 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.493519 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.505226 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.519407 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:02Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.559151 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.559191 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.559203 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.559221 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.559231 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:02Z","lastTransitionTime":"2025-11-25T09:45:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.661915 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.662029 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.662042 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.662064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.662077 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:02Z","lastTransitionTime":"2025-11-25T09:45:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.765124 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.765195 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.765216 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.765252 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.765270 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:02Z","lastTransitionTime":"2025-11-25T09:45:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.868638 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.869395 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.869523 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.869597 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.869671 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:02Z","lastTransitionTime":"2025-11-25T09:45:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.913935 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:02 crc kubenswrapper[4769]: E1125 09:45:02.914289 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:45:02 crc kubenswrapper[4769]: E1125 09:45:02.914586 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:45:02 crc kubenswrapper[4769]: E1125 09:45:02.914604 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:45:02 crc kubenswrapper[4769]: E1125 09:45:02.914671 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:45:34.91465292 +0000 UTC m=+83.499625233 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.972532 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.972579 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.972591 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.972606 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:02 crc kubenswrapper[4769]: I1125 09:45:02.972618 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:02Z","lastTransitionTime":"2025-11-25T09:45:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.015135 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:45:03 crc kubenswrapper[4769]: E1125 09:45:03.015311 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:45:35.015278961 +0000 UTC m=+83.600251274 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.015458 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.015541 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.015576 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:03 crc kubenswrapper[4769]: E1125 09:45:03.015578 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:45:03 crc kubenswrapper[4769]: E1125 09:45:03.015683 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:45:03 crc kubenswrapper[4769]: E1125 09:45:03.015699 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:45:03 crc kubenswrapper[4769]: E1125 09:45:03.015710 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:45:03 crc kubenswrapper[4769]: E1125 09:45:03.015712 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:45:35.015689122 +0000 UTC m=+83.600661475 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:45:03 crc kubenswrapper[4769]: E1125 09:45:03.015619 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:45:03 crc kubenswrapper[4769]: E1125 09:45:03.015745 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:45:35.015737453 +0000 UTC m=+83.600709766 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:45:03 crc kubenswrapper[4769]: E1125 09:45:03.015778 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:45:35.015764284 +0000 UTC m=+83.600736687 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.074824 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.074865 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.074875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.074893 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.074903 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:03Z","lastTransitionTime":"2025-11-25T09:45:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.178744 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.178807 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.178827 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.178856 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.178876 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:03Z","lastTransitionTime":"2025-11-25T09:45:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.236501 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.236548 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.236688 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:03 crc kubenswrapper[4769]: E1125 09:45:03.236823 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:03 crc kubenswrapper[4769]: E1125 09:45:03.237047 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:03 crc kubenswrapper[4769]: E1125 09:45:03.237238 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.238139 4769 scope.go:117] "RemoveContainer" containerID="5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.281209 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.281438 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.281500 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.281589 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.281684 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:03Z","lastTransitionTime":"2025-11-25T09:45:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.384543 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.384600 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.384613 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.384633 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.384645 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:03Z","lastTransitionTime":"2025-11-25T09:45:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.488709 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.488799 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.488841 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.488871 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.488907 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:03Z","lastTransitionTime":"2025-11-25T09:45:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.592072 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.592112 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.592123 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.592139 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.592157 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:03Z","lastTransitionTime":"2025-11-25T09:45:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.656380 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/1.log" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.659347 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7"} Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.659825 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.674610 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.692001 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.694819 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.694850 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.694862 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.694879 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.694891 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:03Z","lastTransitionTime":"2025-11-25T09:45:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.713251 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.737649 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:51Z\\\",\\\"message\\\":\\\"moved *v1.Namespace event handler 1\\\\nI1125 09:44:50.984553 6229 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:44:50.984560 6229 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:44:50.984879 6229 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:44:50.984929 6229 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:44:50.984942 6229 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:44:50.984991 6229 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:44:50.985033 6229 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:44:50.985064 6229 factory.go:656] Stopping watch factory\\\\nI1125 09:44:50.985082 6229 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:44:50.985119 6229 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:44:50.985146 6229 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:44:50.985159 6229 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 09:44:50.985166 6229 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:44:50.985175 6229 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:44:50.985188 6229 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 09:44:50.985297 6229 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:45:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.759649 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.774572 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.797825 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.797897 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.798044 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.798063 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.798084 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.798095 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:03Z","lastTransitionTime":"2025-11-25T09:45:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.811171 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.823821 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.838643 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.853824 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.870604 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.886895 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.900052 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.901426 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.901476 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.901492 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.901517 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.901532 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:03Z","lastTransitionTime":"2025-11-25T09:45:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.913890 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:03 crc kubenswrapper[4769]: I1125 09:45:03.928557 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:03Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.003809 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.003853 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.003865 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.003881 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.003893 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:04Z","lastTransitionTime":"2025-11-25T09:45:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.107072 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.107130 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.107143 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.107160 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.107176 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:04Z","lastTransitionTime":"2025-11-25T09:45:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.210197 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.210531 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.210546 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.210565 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.210578 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:04Z","lastTransitionTime":"2025-11-25T09:45:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.235908 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:04 crc kubenswrapper[4769]: E1125 09:45:04.236145 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.313171 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.313221 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.313242 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.313263 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.313276 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:04Z","lastTransitionTime":"2025-11-25T09:45:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.416373 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.416432 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.416453 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.416478 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.416492 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:04Z","lastTransitionTime":"2025-11-25T09:45:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.519973 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.520043 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.520056 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.520077 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.520092 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:04Z","lastTransitionTime":"2025-11-25T09:45:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.624163 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.624239 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.624263 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.624291 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.624309 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:04Z","lastTransitionTime":"2025-11-25T09:45:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.666069 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/2.log" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.667099 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/1.log" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.671365 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7" exitCode=1 Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.671444 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7"} Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.671511 4769 scope.go:117] "RemoveContainer" containerID="5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.672697 4769 scope.go:117] "RemoveContainer" containerID="a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7" Nov 25 09:45:04 crc kubenswrapper[4769]: E1125 09:45:04.673258 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\"" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.698922 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.713226 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.727205 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.728771 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.728804 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.728814 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.728832 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.728845 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:04Z","lastTransitionTime":"2025-11-25T09:45:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.740919 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.752196 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.767609 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.781498 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.795711 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.811329 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.822656 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.832944 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.833005 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.833017 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.833041 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.833054 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:04Z","lastTransitionTime":"2025-11-25T09:45:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.838790 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.859430 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5411a17d2e0213f4117bc5ada7510b454022b5daa6adecdefdeb5540d0ab97b9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:44:51Z\\\",\\\"message\\\":\\\"moved *v1.Namespace event handler 1\\\\nI1125 09:44:50.984553 6229 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 09:44:50.984560 6229 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 09:44:50.984879 6229 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 09:44:50.984929 6229 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 09:44:50.984942 6229 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 09:44:50.984991 6229 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 09:44:50.985033 6229 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 09:44:50.985064 6229 factory.go:656] Stopping watch factory\\\\nI1125 09:44:50.985082 6229 ovnkube.go:599] Stopped ovnkube\\\\nI1125 09:44:50.985119 6229 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 09:44:50.985146 6229 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 09:44:50.985159 6229 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 09:44:50.985166 6229 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 09:44:50.985175 6229 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 09:44:50.985188 6229 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 09:44:50.985297 6229 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:04Z\\\",\\\"message\\\":\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.174\\\\\\\", Port:9393, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:45:04.168194 6423 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 09:45:04.168194 6423 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1125 09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.874857 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.889327 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.901247 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.912111 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:04Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.936269 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.936308 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.936321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.936341 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:04 crc kubenswrapper[4769]: I1125 09:45:04.936355 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:04Z","lastTransitionTime":"2025-11-25T09:45:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.039862 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.040128 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.040141 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.040164 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.040177 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:05Z","lastTransitionTime":"2025-11-25T09:45:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.142286 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.142334 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.142532 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.142549 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.142559 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:05Z","lastTransitionTime":"2025-11-25T09:45:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.236242 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.236309 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.236367 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:05 crc kubenswrapper[4769]: E1125 09:45:05.236559 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:05 crc kubenswrapper[4769]: E1125 09:45:05.236709 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:05 crc kubenswrapper[4769]: E1125 09:45:05.236822 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.245435 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.245503 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.245527 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.245558 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.245577 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:05Z","lastTransitionTime":"2025-11-25T09:45:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.349161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.349199 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.349212 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.349238 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.349250 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:05Z","lastTransitionTime":"2025-11-25T09:45:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.452640 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.452691 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.452699 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.452713 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.452722 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:05Z","lastTransitionTime":"2025-11-25T09:45:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.556173 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.556320 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.556398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.556430 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.556452 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:05Z","lastTransitionTime":"2025-11-25T09:45:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.660211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.660297 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.660311 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.660360 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.660375 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:05Z","lastTransitionTime":"2025-11-25T09:45:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.676875 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/2.log" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.680581 4769 scope.go:117] "RemoveContainer" containerID="a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7" Nov 25 09:45:05 crc kubenswrapper[4769]: E1125 09:45:05.680723 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\"" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.701420 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:04Z\\\",\\\"message\\\":\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.174\\\\\\\", Port:9393, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:45:04.168194 6423 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 09:45:04.168194 6423 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1125 09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.716521 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.730288 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.743219 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.757471 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.762847 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.762901 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.762911 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.762935 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.762955 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:05Z","lastTransitionTime":"2025-11-25T09:45:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.777862 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.794503 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.804729 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.816197 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.829669 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.843371 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.855758 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.866047 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.866088 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.866096 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.866114 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.866127 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:05Z","lastTransitionTime":"2025-11-25T09:45:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.871774 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.885543 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.897697 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.909839 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.921692 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.932834 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.937223 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.952083 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.962670 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.968339 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.968375 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.968385 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.968401 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.968413 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:05Z","lastTransitionTime":"2025-11-25T09:45:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.974148 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:05 crc kubenswrapper[4769]: I1125 09:45:05.991259 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:05Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.009213 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.023360 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.038211 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.057514 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.071139 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.071186 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.071199 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.071220 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.071236 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:06Z","lastTransitionTime":"2025-11-25T09:45:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.078579 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:04Z\\\",\\\"message\\\":\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.174\\\\\\\", Port:9393, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:45:04.168194 6423 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 09:45:04.168194 6423 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1125 09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.090696 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.102957 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.115196 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.131765 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.144476 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.157801 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:06Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.174108 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.174163 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.174173 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.174195 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.174206 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:06Z","lastTransitionTime":"2025-11-25T09:45:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.236727 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:06 crc kubenswrapper[4769]: E1125 09:45:06.236925 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.277134 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.277214 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.277225 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.277243 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.277258 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:06Z","lastTransitionTime":"2025-11-25T09:45:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.379382 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.379440 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.379453 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.379471 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.379483 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:06Z","lastTransitionTime":"2025-11-25T09:45:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.482214 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.482275 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.482288 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.482309 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.482322 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:06Z","lastTransitionTime":"2025-11-25T09:45:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.585427 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.585480 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.585492 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.585511 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.585524 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:06Z","lastTransitionTime":"2025-11-25T09:45:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.687520 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.687577 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.687596 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.687616 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.687631 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:06Z","lastTransitionTime":"2025-11-25T09:45:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.791884 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.791932 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.791940 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.791978 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.792011 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:06Z","lastTransitionTime":"2025-11-25T09:45:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.895704 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.895756 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.895768 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.895788 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:06 crc kubenswrapper[4769]: I1125 09:45:06.895802 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:06Z","lastTransitionTime":"2025-11-25T09:45:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.004476 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.004537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.004549 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.004572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.004585 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:07Z","lastTransitionTime":"2025-11-25T09:45:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.107363 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.107396 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.107405 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.107419 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.107427 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:07Z","lastTransitionTime":"2025-11-25T09:45:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.209637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.209665 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.209674 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.209687 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.209696 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:07Z","lastTransitionTime":"2025-11-25T09:45:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.236553 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.236617 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:07 crc kubenswrapper[4769]: E1125 09:45:07.236721 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.236562 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:07 crc kubenswrapper[4769]: E1125 09:45:07.236996 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:07 crc kubenswrapper[4769]: E1125 09:45:07.236887 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.312368 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.312426 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.312440 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.312460 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.312474 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:07Z","lastTransitionTime":"2025-11-25T09:45:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.415617 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.415718 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.415735 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.415771 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.415787 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:07Z","lastTransitionTime":"2025-11-25T09:45:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.517788 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.517823 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.517831 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.517844 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.517853 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:07Z","lastTransitionTime":"2025-11-25T09:45:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.621284 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.621347 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.621371 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.621402 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.621424 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:07Z","lastTransitionTime":"2025-11-25T09:45:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.723799 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.723854 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.723866 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.723883 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.723897 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:07Z","lastTransitionTime":"2025-11-25T09:45:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.826794 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.826843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.826857 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.826875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.826888 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:07Z","lastTransitionTime":"2025-11-25T09:45:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.870943 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:07 crc kubenswrapper[4769]: E1125 09:45:07.871190 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:45:07 crc kubenswrapper[4769]: E1125 09:45:07.871299 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs podName:76770e00-0d61-45ae-9772-1e8c42dc6ea6 nodeName:}" failed. No retries permitted until 2025-11-25 09:45:23.871278152 +0000 UTC m=+72.456250505 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs") pod "network-metrics-daemon-7khh9" (UID: "76770e00-0d61-45ae-9772-1e8c42dc6ea6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.930382 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.930423 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.930435 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.930452 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:07 crc kubenswrapper[4769]: I1125 09:45:07.930466 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:07Z","lastTransitionTime":"2025-11-25T09:45:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.033765 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.033810 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.033822 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.033840 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.033852 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.137331 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.137372 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.137383 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.137400 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.137412 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.237101 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:08 crc kubenswrapper[4769]: E1125 09:45:08.237323 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.241658 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.242248 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.242412 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.242443 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.242456 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.345388 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.345436 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.345447 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.345465 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.345475 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.448083 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.448130 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.448147 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.448164 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.448176 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.550326 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.550404 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.550432 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.550466 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.550490 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.653045 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.653080 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.653090 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.653103 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.653113 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.665815 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.665861 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.665876 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.665891 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.665902 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: E1125 09:45:08.682741 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.686817 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.686870 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.686886 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.686908 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.686921 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: E1125 09:45:08.699737 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.704318 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.704363 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.704377 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.704394 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.704409 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: E1125 09:45:08.715840 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.719646 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.719695 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.719706 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.719723 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.719756 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: E1125 09:45:08.732281 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.739467 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.739643 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.739658 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.739820 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.740351 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: E1125 09:45:08.753679 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:08Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:08 crc kubenswrapper[4769]: E1125 09:45:08.753810 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.755678 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.755705 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.755717 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.755734 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.755745 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.858472 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.858556 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.858571 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.858587 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.858617 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.960582 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.960615 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.960623 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.960637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:08 crc kubenswrapper[4769]: I1125 09:45:08.960647 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:08Z","lastTransitionTime":"2025-11-25T09:45:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.063567 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.063621 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.063638 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.063658 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.063675 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:09Z","lastTransitionTime":"2025-11-25T09:45:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.165715 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.165766 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.165778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.165797 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.165810 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:09Z","lastTransitionTime":"2025-11-25T09:45:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.236437 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.236499 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:09 crc kubenswrapper[4769]: E1125 09:45:09.236565 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:09 crc kubenswrapper[4769]: E1125 09:45:09.236629 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.236508 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:09 crc kubenswrapper[4769]: E1125 09:45:09.236752 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.268674 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.268725 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.268741 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.268763 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.268774 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:09Z","lastTransitionTime":"2025-11-25T09:45:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.371002 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.371056 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.371072 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.371092 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.371107 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:09Z","lastTransitionTime":"2025-11-25T09:45:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.473007 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.473043 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.473052 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.473068 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.473078 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:09Z","lastTransitionTime":"2025-11-25T09:45:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.575247 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.575287 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.575308 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.575335 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.575347 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:09Z","lastTransitionTime":"2025-11-25T09:45:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.678368 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.678428 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.678451 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.678476 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.678493 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:09Z","lastTransitionTime":"2025-11-25T09:45:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.780776 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.780850 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.780877 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.780909 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.780934 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:09Z","lastTransitionTime":"2025-11-25T09:45:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.883777 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.883914 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.883950 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.884022 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.884051 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:09Z","lastTransitionTime":"2025-11-25T09:45:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.987823 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.987941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.988040 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.988138 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:09 crc kubenswrapper[4769]: I1125 09:45:09.988167 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:09Z","lastTransitionTime":"2025-11-25T09:45:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.090889 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.090926 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.090938 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.090972 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.090986 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:10Z","lastTransitionTime":"2025-11-25T09:45:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.193251 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.193316 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.193338 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.193368 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.193389 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:10Z","lastTransitionTime":"2025-11-25T09:45:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.236371 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:10 crc kubenswrapper[4769]: E1125 09:45:10.236562 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.296029 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.296101 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.296118 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.296143 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.296159 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:10Z","lastTransitionTime":"2025-11-25T09:45:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.399290 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.399370 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.399395 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.399420 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.399437 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:10Z","lastTransitionTime":"2025-11-25T09:45:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.502012 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.502049 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.502057 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.502070 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.502079 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:10Z","lastTransitionTime":"2025-11-25T09:45:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.603892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.603934 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.603951 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.603986 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.603999 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:10Z","lastTransitionTime":"2025-11-25T09:45:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.706483 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.706538 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.706554 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.706582 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.706605 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:10Z","lastTransitionTime":"2025-11-25T09:45:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.809621 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.809696 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.809707 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.809731 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.809743 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:10Z","lastTransitionTime":"2025-11-25T09:45:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.912723 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.912804 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.912833 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.912867 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:10 crc kubenswrapper[4769]: I1125 09:45:10.912890 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:10Z","lastTransitionTime":"2025-11-25T09:45:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.015869 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.015907 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.015921 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.015939 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.015951 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:11Z","lastTransitionTime":"2025-11-25T09:45:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.118383 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.118430 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.118441 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.118458 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.118470 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:11Z","lastTransitionTime":"2025-11-25T09:45:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.221180 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.221225 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.221233 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.221248 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.221258 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:11Z","lastTransitionTime":"2025-11-25T09:45:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.236643 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.236773 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.236721 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:11 crc kubenswrapper[4769]: E1125 09:45:11.236880 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:11 crc kubenswrapper[4769]: E1125 09:45:11.237001 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:11 crc kubenswrapper[4769]: E1125 09:45:11.237133 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.329874 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.329995 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.330012 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.330030 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.330043 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:11Z","lastTransitionTime":"2025-11-25T09:45:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.433852 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.434943 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.434981 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.435010 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.435022 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:11Z","lastTransitionTime":"2025-11-25T09:45:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.537195 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.537245 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.537253 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.537459 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.537471 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:11Z","lastTransitionTime":"2025-11-25T09:45:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.640362 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.640421 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.640439 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.640462 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.640480 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:11Z","lastTransitionTime":"2025-11-25T09:45:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.742865 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.742903 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.742912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.742925 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.742934 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:11Z","lastTransitionTime":"2025-11-25T09:45:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.847890 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.847943 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.847987 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.848006 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.848020 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:11Z","lastTransitionTime":"2025-11-25T09:45:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.952397 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.952460 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.952477 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.952501 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:11 crc kubenswrapper[4769]: I1125 09:45:11.952516 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:11Z","lastTransitionTime":"2025-11-25T09:45:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.055506 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.055554 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.055562 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.055582 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.055591 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:12Z","lastTransitionTime":"2025-11-25T09:45:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.158623 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.158691 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.158724 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.158765 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.158791 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:12Z","lastTransitionTime":"2025-11-25T09:45:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.235856 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:12 crc kubenswrapper[4769]: E1125 09:45:12.236046 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.250201 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.261927 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.262037 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.262052 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.262074 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.262093 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:12Z","lastTransitionTime":"2025-11-25T09:45:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.265708 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.281230 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.296179 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.311323 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.326647 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.347457 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.367023 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.380605 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.393142 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.400178 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.400214 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.400225 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.400243 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.400256 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:12Z","lastTransitionTime":"2025-11-25T09:45:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.413586 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:04Z\\\",\\\"message\\\":\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.174\\\\\\\", Port:9393, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:45:04.168194 6423 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 09:45:04.168194 6423 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1125 09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.433288 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.447984 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3cb8bb1-44ab-4ea0-8360-fd206e03b0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea75adfbd1af3891c8c83253f0cc56e3b7f0eef396bd625d683d955d9732eba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1defc8279ef3e3bcb7b94e0ebecd9adfa114c27b4aba6b07fc9cce9933ac3eed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cb7e7ffd92e069437d5588d6e1f2bcb3d776fe24abf845fa3554e8ef8ebacb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.465041 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.481691 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.497280 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.502301 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.502352 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.502364 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.502379 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.502390 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:12Z","lastTransitionTime":"2025-11-25T09:45:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.516129 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:12Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.604680 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.604710 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.604719 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.604735 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.604746 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:12Z","lastTransitionTime":"2025-11-25T09:45:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.706626 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.706660 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.706670 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.706685 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.706696 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:12Z","lastTransitionTime":"2025-11-25T09:45:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.809664 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.809714 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.809730 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.809748 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.809761 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:12Z","lastTransitionTime":"2025-11-25T09:45:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.912781 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.912838 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.912851 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.912869 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:12 crc kubenswrapper[4769]: I1125 09:45:12.912883 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:12Z","lastTransitionTime":"2025-11-25T09:45:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.014811 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.014855 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.014863 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.014875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.014884 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:13Z","lastTransitionTime":"2025-11-25T09:45:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.117717 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.117762 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.117773 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.117791 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.117802 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:13Z","lastTransitionTime":"2025-11-25T09:45:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.223541 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.223593 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.223606 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.223623 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.223632 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:13Z","lastTransitionTime":"2025-11-25T09:45:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.236628 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.236691 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.236754 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:13 crc kubenswrapper[4769]: E1125 09:45:13.236812 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:13 crc kubenswrapper[4769]: E1125 09:45:13.236922 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:13 crc kubenswrapper[4769]: E1125 09:45:13.237007 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.327218 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.327283 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.327301 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.327325 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.327344 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:13Z","lastTransitionTime":"2025-11-25T09:45:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.430484 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.430546 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.430561 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.430583 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.430596 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:13Z","lastTransitionTime":"2025-11-25T09:45:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.535492 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.535622 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.535638 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.535681 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.535699 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:13Z","lastTransitionTime":"2025-11-25T09:45:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.638888 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.638954 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.638999 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.639031 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.639085 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:13Z","lastTransitionTime":"2025-11-25T09:45:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.742866 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.742921 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.742931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.742946 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.742956 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:13Z","lastTransitionTime":"2025-11-25T09:45:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.845522 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.845570 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.845582 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.845604 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.845616 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:13Z","lastTransitionTime":"2025-11-25T09:45:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.949409 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.949482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.949497 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.949517 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:13 crc kubenswrapper[4769]: I1125 09:45:13.949532 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:13Z","lastTransitionTime":"2025-11-25T09:45:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.051936 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.052006 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.052019 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.052038 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.052050 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:14Z","lastTransitionTime":"2025-11-25T09:45:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.155209 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.155243 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.155276 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.155292 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.155302 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:14Z","lastTransitionTime":"2025-11-25T09:45:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.235829 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:14 crc kubenswrapper[4769]: E1125 09:45:14.235999 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.257987 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.258054 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.258068 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.258089 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.258104 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:14Z","lastTransitionTime":"2025-11-25T09:45:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.361911 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.362016 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.362042 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.362074 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.362100 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:14Z","lastTransitionTime":"2025-11-25T09:45:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.465646 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.465699 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.465719 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.465743 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.465762 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:14Z","lastTransitionTime":"2025-11-25T09:45:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.568896 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.569311 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.569546 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.569756 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.569944 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:14Z","lastTransitionTime":"2025-11-25T09:45:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.673186 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.673259 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.673279 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.673311 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.673332 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:14Z","lastTransitionTime":"2025-11-25T09:45:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.775599 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.775671 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.775686 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.775708 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.775723 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:14Z","lastTransitionTime":"2025-11-25T09:45:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.878273 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.878317 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.878329 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.878345 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.878357 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:14Z","lastTransitionTime":"2025-11-25T09:45:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.981272 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.981309 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.981319 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.981336 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:14 crc kubenswrapper[4769]: I1125 09:45:14.981346 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:14Z","lastTransitionTime":"2025-11-25T09:45:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.084794 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.084856 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.084872 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.084892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.084904 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:15Z","lastTransitionTime":"2025-11-25T09:45:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.188073 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.188167 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.188186 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.188215 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.188240 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:15Z","lastTransitionTime":"2025-11-25T09:45:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.236147 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.236183 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:15 crc kubenswrapper[4769]: E1125 09:45:15.236347 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:15 crc kubenswrapper[4769]: E1125 09:45:15.236487 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.236759 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:15 crc kubenswrapper[4769]: E1125 09:45:15.236931 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.293513 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.293584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.293597 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.293619 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.293632 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:15Z","lastTransitionTime":"2025-11-25T09:45:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.397132 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.397202 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.397215 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.397234 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.397248 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:15Z","lastTransitionTime":"2025-11-25T09:45:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.500472 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.500524 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.500534 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.500552 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.500564 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:15Z","lastTransitionTime":"2025-11-25T09:45:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.606688 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.606751 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.606763 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.606785 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.606799 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:15Z","lastTransitionTime":"2025-11-25T09:45:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.710239 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.710307 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.710322 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.710344 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.710366 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:15Z","lastTransitionTime":"2025-11-25T09:45:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.812640 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.812680 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.812695 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.812713 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.812724 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:15Z","lastTransitionTime":"2025-11-25T09:45:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.916549 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.917183 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.917448 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.917722 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:15 crc kubenswrapper[4769]: I1125 09:45:15.917929 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:15Z","lastTransitionTime":"2025-11-25T09:45:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.025457 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.025521 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.025536 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.025559 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.025577 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:16Z","lastTransitionTime":"2025-11-25T09:45:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.128428 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.128516 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.128530 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.128551 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.128565 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:16Z","lastTransitionTime":"2025-11-25T09:45:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.232589 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.233483 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.233592 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.233693 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.233775 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:16Z","lastTransitionTime":"2025-11-25T09:45:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.236859 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:16 crc kubenswrapper[4769]: E1125 09:45:16.237288 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.336522 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.336893 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.337050 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.337181 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.337288 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:16Z","lastTransitionTime":"2025-11-25T09:45:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.440846 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.440898 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.440907 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.440925 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.440935 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:16Z","lastTransitionTime":"2025-11-25T09:45:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.543374 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.543451 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.543463 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.543488 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.543501 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:16Z","lastTransitionTime":"2025-11-25T09:45:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.646169 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.646223 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.646237 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.646256 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.646272 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:16Z","lastTransitionTime":"2025-11-25T09:45:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.748582 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.748644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.748657 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.748676 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.748689 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:16Z","lastTransitionTime":"2025-11-25T09:45:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.852136 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.852538 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.852628 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.852742 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.852863 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:16Z","lastTransitionTime":"2025-11-25T09:45:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.956130 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.956193 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.956206 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.956230 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:16 crc kubenswrapper[4769]: I1125 09:45:16.956245 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:16Z","lastTransitionTime":"2025-11-25T09:45:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.059956 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.060025 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.060036 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.060053 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.060066 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:17Z","lastTransitionTime":"2025-11-25T09:45:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.162325 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.162363 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.162374 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.162394 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.162405 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:17Z","lastTransitionTime":"2025-11-25T09:45:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.236459 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:17 crc kubenswrapper[4769]: E1125 09:45:17.236605 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.236790 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:17 crc kubenswrapper[4769]: E1125 09:45:17.236835 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.236938 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:17 crc kubenswrapper[4769]: E1125 09:45:17.237018 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.265367 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.265403 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.265416 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.265432 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.265442 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:17Z","lastTransitionTime":"2025-11-25T09:45:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.367154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.367231 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.367255 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.367285 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.367304 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:17Z","lastTransitionTime":"2025-11-25T09:45:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.470111 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.470156 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.470166 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.470185 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.470197 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:17Z","lastTransitionTime":"2025-11-25T09:45:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.573592 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.573644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.573654 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.573672 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.573684 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:17Z","lastTransitionTime":"2025-11-25T09:45:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.677001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.677398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.677593 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.677801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.678075 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:17Z","lastTransitionTime":"2025-11-25T09:45:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.781617 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.781685 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.781701 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.781718 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.781731 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:17Z","lastTransitionTime":"2025-11-25T09:45:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.884848 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.884888 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.884900 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.884917 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.884928 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:17Z","lastTransitionTime":"2025-11-25T09:45:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.987995 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.988050 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.988063 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.988083 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:17 crc kubenswrapper[4769]: I1125 09:45:17.988096 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:17Z","lastTransitionTime":"2025-11-25T09:45:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.092119 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.092170 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.092179 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.092198 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.092209 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.194613 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.194925 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.195071 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.195163 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.195254 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.236567 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:18 crc kubenswrapper[4769]: E1125 09:45:18.236754 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.298164 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.298230 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.298242 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.298268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.298290 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.402502 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.402858 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.402931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.403037 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.403116 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.505577 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.505637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.505651 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.505670 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.505685 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.608744 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.608797 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.608810 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.608831 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.608844 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.710836 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.710878 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.710889 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.710905 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.710915 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.814499 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.814555 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.814566 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.814586 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.814599 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.917916 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.918639 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.918709 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.918815 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.918913 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.921382 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.921435 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.921445 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.921466 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.921480 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: E1125 09:45:18.943056 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:18Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.947802 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.947859 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.947881 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.947905 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.947923 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: E1125 09:45:18.963007 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:18Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.967910 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.967984 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.968001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.968020 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.968033 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:18 crc kubenswrapper[4769]: E1125 09:45:18.980723 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:18Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.986591 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.986682 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.986706 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.986736 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:18 crc kubenswrapper[4769]: I1125 09:45:18.986761 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:18Z","lastTransitionTime":"2025-11-25T09:45:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:19 crc kubenswrapper[4769]: E1125 09:45:19.004418 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.008886 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.008918 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.008929 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.008947 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.008972 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:19Z","lastTransitionTime":"2025-11-25T09:45:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:19 crc kubenswrapper[4769]: E1125 09:45:19.023561 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:19Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:19 crc kubenswrapper[4769]: E1125 09:45:19.023723 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.025904 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.025995 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.026008 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.026026 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.026037 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:19Z","lastTransitionTime":"2025-11-25T09:45:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.129250 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.129300 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.129313 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.129332 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.129343 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:19Z","lastTransitionTime":"2025-11-25T09:45:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.233526 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.233588 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.233604 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.233629 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.233642 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:19Z","lastTransitionTime":"2025-11-25T09:45:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.236761 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.236838 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.236931 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:19 crc kubenswrapper[4769]: E1125 09:45:19.236951 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:19 crc kubenswrapper[4769]: E1125 09:45:19.237090 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:19 crc kubenswrapper[4769]: E1125 09:45:19.237214 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.336059 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.336098 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.336106 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.336123 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.336132 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:19Z","lastTransitionTime":"2025-11-25T09:45:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.439591 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.439645 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.439662 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.439686 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.439700 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:19Z","lastTransitionTime":"2025-11-25T09:45:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.543033 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.543074 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.543091 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.543110 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.543123 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:19Z","lastTransitionTime":"2025-11-25T09:45:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.647248 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.647324 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.647346 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.647381 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.647404 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:19Z","lastTransitionTime":"2025-11-25T09:45:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.750448 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.750817 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.750948 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.751120 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.751237 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:19Z","lastTransitionTime":"2025-11-25T09:45:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.860108 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.860458 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.860520 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.860581 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.860647 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:19Z","lastTransitionTime":"2025-11-25T09:45:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.963637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.963769 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.963782 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.963822 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:19 crc kubenswrapper[4769]: I1125 09:45:19.963833 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:19Z","lastTransitionTime":"2025-11-25T09:45:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.066400 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.066824 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.066906 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.067005 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.067074 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:20Z","lastTransitionTime":"2025-11-25T09:45:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.170221 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.170269 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.170284 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.170304 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.170318 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:20Z","lastTransitionTime":"2025-11-25T09:45:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.235986 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:20 crc kubenswrapper[4769]: E1125 09:45:20.236179 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.237612 4769 scope.go:117] "RemoveContainer" containerID="a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7" Nov 25 09:45:20 crc kubenswrapper[4769]: E1125 09:45:20.238085 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\"" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.273244 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.273293 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.273302 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.273320 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.273331 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:20Z","lastTransitionTime":"2025-11-25T09:45:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.375905 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.375954 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.375982 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.376002 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.376013 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:20Z","lastTransitionTime":"2025-11-25T09:45:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.478596 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.478673 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.478687 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.478705 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.478716 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:20Z","lastTransitionTime":"2025-11-25T09:45:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.581515 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.581570 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.581581 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.581597 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.581609 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:20Z","lastTransitionTime":"2025-11-25T09:45:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.684599 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.684667 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.684685 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.684715 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.684731 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:20Z","lastTransitionTime":"2025-11-25T09:45:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.787627 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.787695 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.787712 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.787739 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.787758 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:20Z","lastTransitionTime":"2025-11-25T09:45:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.890799 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.890842 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.890851 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.890872 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.890882 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:20Z","lastTransitionTime":"2025-11-25T09:45:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.993572 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.993619 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.993630 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.993646 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:20 crc kubenswrapper[4769]: I1125 09:45:20.993658 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:20Z","lastTransitionTime":"2025-11-25T09:45:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.095687 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.095742 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.095755 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.095779 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.095791 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:21Z","lastTransitionTime":"2025-11-25T09:45:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.198343 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.198393 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.198405 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.198422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.198437 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:21Z","lastTransitionTime":"2025-11-25T09:45:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.236185 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.236188 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:21 crc kubenswrapper[4769]: E1125 09:45:21.236379 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:21 crc kubenswrapper[4769]: E1125 09:45:21.236496 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.236188 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:21 crc kubenswrapper[4769]: E1125 09:45:21.236607 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.301126 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.301194 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.301203 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.301220 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.301230 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:21Z","lastTransitionTime":"2025-11-25T09:45:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.403951 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.404033 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.404049 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.404077 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.404097 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:21Z","lastTransitionTime":"2025-11-25T09:45:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.507393 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.507445 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.507473 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.507500 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.507513 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:21Z","lastTransitionTime":"2025-11-25T09:45:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.610114 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.610390 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.610450 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.610575 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.610638 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:21Z","lastTransitionTime":"2025-11-25T09:45:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.714116 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.714174 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.714192 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.714216 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.714233 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:21Z","lastTransitionTime":"2025-11-25T09:45:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.816303 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.816625 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.816843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.817011 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.817121 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:21Z","lastTransitionTime":"2025-11-25T09:45:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.919567 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.919873 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.920014 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.920093 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:21 crc kubenswrapper[4769]: I1125 09:45:21.920153 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:21Z","lastTransitionTime":"2025-11-25T09:45:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.022737 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.023116 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.023213 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.023293 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.023369 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:22Z","lastTransitionTime":"2025-11-25T09:45:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.126044 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.126083 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.126091 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.126125 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.126138 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:22Z","lastTransitionTime":"2025-11-25T09:45:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.229186 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.229228 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.229239 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.229254 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.229264 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:22Z","lastTransitionTime":"2025-11-25T09:45:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.236568 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:22 crc kubenswrapper[4769]: E1125 09:45:22.236693 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.256254 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.268733 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.281491 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.293124 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.306756 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.319308 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.331901 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.332157 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.332186 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.332197 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.332214 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.332224 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:22Z","lastTransitionTime":"2025-11-25T09:45:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.344932 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.360423 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.378929 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:04Z\\\",\\\"message\\\":\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.174\\\\\\\", Port:9393, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:45:04.168194 6423 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 09:45:04.168194 6423 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1125 09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.394727 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.407300 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3cb8bb1-44ab-4ea0-8360-fd206e03b0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea75adfbd1af3891c8c83253f0cc56e3b7f0eef396bd625d683d955d9732eba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1defc8279ef3e3bcb7b94e0ebecd9adfa114c27b4aba6b07fc9cce9933ac3eed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cb7e7ffd92e069437d5588d6e1f2bcb3d776fe24abf845fa3554e8ef8ebacb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.420271 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.430447 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.435028 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.435062 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.435071 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.435089 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.435101 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:22Z","lastTransitionTime":"2025-11-25T09:45:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.445496 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.457641 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.469394 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:22Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.537639 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.537679 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.537688 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.537702 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.537712 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:22Z","lastTransitionTime":"2025-11-25T09:45:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.641738 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.641788 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.641801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.641819 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.641832 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:22Z","lastTransitionTime":"2025-11-25T09:45:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.743907 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.743946 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.743983 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.743999 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.744011 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:22Z","lastTransitionTime":"2025-11-25T09:45:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.846444 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.846491 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.846500 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.846519 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.846529 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:22Z","lastTransitionTime":"2025-11-25T09:45:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.949112 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.949159 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.949168 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.949187 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:22 crc kubenswrapper[4769]: I1125 09:45:22.949198 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:22Z","lastTransitionTime":"2025-11-25T09:45:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.052896 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.053245 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.053373 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.053441 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.053509 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:23Z","lastTransitionTime":"2025-11-25T09:45:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.157324 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.157378 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.157391 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.157410 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.157423 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:23Z","lastTransitionTime":"2025-11-25T09:45:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.236856 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.237012 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:23 crc kubenswrapper[4769]: E1125 09:45:23.237047 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:23 crc kubenswrapper[4769]: E1125 09:45:23.237236 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.237271 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:23 crc kubenswrapper[4769]: E1125 09:45:23.237319 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.260678 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.260726 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.260736 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.260753 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.260764 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:23Z","lastTransitionTime":"2025-11-25T09:45:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.363886 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.363934 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.363944 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.363984 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.363997 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:23Z","lastTransitionTime":"2025-11-25T09:45:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.467251 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.467301 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.467316 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.467335 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.467348 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:23Z","lastTransitionTime":"2025-11-25T09:45:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.570727 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.570806 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.570825 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.570856 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.570875 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:23Z","lastTransitionTime":"2025-11-25T09:45:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.674226 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.674282 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.674301 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.674332 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.674352 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:23Z","lastTransitionTime":"2025-11-25T09:45:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.776686 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.776750 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.776768 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.776795 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.776815 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:23Z","lastTransitionTime":"2025-11-25T09:45:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.880232 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.880286 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.880297 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.880317 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.880328 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:23Z","lastTransitionTime":"2025-11-25T09:45:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.963316 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:23 crc kubenswrapper[4769]: E1125 09:45:23.963544 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:45:23 crc kubenswrapper[4769]: E1125 09:45:23.963641 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs podName:76770e00-0d61-45ae-9772-1e8c42dc6ea6 nodeName:}" failed. No retries permitted until 2025-11-25 09:45:55.963618573 +0000 UTC m=+104.548590886 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs") pod "network-metrics-daemon-7khh9" (UID: "76770e00-0d61-45ae-9772-1e8c42dc6ea6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.983127 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.983180 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.983191 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.983210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:23 crc kubenswrapper[4769]: I1125 09:45:23.983222 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:23Z","lastTransitionTime":"2025-11-25T09:45:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.085911 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.085953 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.085987 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.086003 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.086013 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:24Z","lastTransitionTime":"2025-11-25T09:45:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.188788 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.188849 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.188860 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.189017 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.189030 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:24Z","lastTransitionTime":"2025-11-25T09:45:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.236422 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:24 crc kubenswrapper[4769]: E1125 09:45:24.236616 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.291655 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.291689 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.291698 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.291714 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.291724 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:24Z","lastTransitionTime":"2025-11-25T09:45:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.394265 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.394299 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.394308 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.394323 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.394331 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:24Z","lastTransitionTime":"2025-11-25T09:45:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.496832 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.496874 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.496884 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.496902 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.496912 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:24Z","lastTransitionTime":"2025-11-25T09:45:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.599807 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.599850 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.599861 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.599881 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.599893 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:24Z","lastTransitionTime":"2025-11-25T09:45:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.702444 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.702493 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.702507 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.702526 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.702543 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:24Z","lastTransitionTime":"2025-11-25T09:45:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.805400 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.805816 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.805935 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.806053 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.806160 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:24Z","lastTransitionTime":"2025-11-25T09:45:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.908867 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.908921 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.908936 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.908983 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:24 crc kubenswrapper[4769]: I1125 09:45:24.909001 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:24Z","lastTransitionTime":"2025-11-25T09:45:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.011765 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.011810 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.011819 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.011836 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.011846 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:25Z","lastTransitionTime":"2025-11-25T09:45:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.114774 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.114853 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.114871 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.114893 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.114908 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:25Z","lastTransitionTime":"2025-11-25T09:45:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.218048 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.218105 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.218118 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.218138 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.218152 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:25Z","lastTransitionTime":"2025-11-25T09:45:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.236814 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.236864 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.237047 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:25 crc kubenswrapper[4769]: E1125 09:45:25.237043 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:25 crc kubenswrapper[4769]: E1125 09:45:25.237203 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:25 crc kubenswrapper[4769]: E1125 09:45:25.237280 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.258877 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.320878 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.320934 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.320944 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.320984 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.320997 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:25Z","lastTransitionTime":"2025-11-25T09:45:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.424310 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.424347 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.424357 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.424373 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.424383 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:25Z","lastTransitionTime":"2025-11-25T09:45:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.527281 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.527327 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.527338 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.527353 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.527363 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:25Z","lastTransitionTime":"2025-11-25T09:45:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.630303 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.630372 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.630389 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.630416 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.630437 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:25Z","lastTransitionTime":"2025-11-25T09:45:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.733943 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.734035 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.734052 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.734083 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.734103 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:25Z","lastTransitionTime":"2025-11-25T09:45:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.836825 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.836889 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.836909 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.836931 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.836946 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:25Z","lastTransitionTime":"2025-11-25T09:45:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.939112 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.939172 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.939191 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.939217 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:25 crc kubenswrapper[4769]: I1125 09:45:25.939236 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:25Z","lastTransitionTime":"2025-11-25T09:45:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.043364 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.043409 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.043420 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.043440 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.043453 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:26Z","lastTransitionTime":"2025-11-25T09:45:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.147090 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.147410 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.147580 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.147714 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.147837 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:26Z","lastTransitionTime":"2025-11-25T09:45:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.237107 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:26 crc kubenswrapper[4769]: E1125 09:45:26.237478 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.251397 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.251800 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.251914 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.252054 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.252195 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:26Z","lastTransitionTime":"2025-11-25T09:45:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.356091 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.356155 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.356181 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.356210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.356233 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:26Z","lastTransitionTime":"2025-11-25T09:45:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.459329 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.459388 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.459403 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.459422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.459436 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:26Z","lastTransitionTime":"2025-11-25T09:45:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.561918 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.561951 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.561981 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.561996 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.562005 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:26Z","lastTransitionTime":"2025-11-25T09:45:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.664596 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.664637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.664647 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.664666 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.664678 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:26Z","lastTransitionTime":"2025-11-25T09:45:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.762152 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-s47tv_025219f0-bc69-4a33-acaa-b055607272bb/kube-multus/0.log" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.762214 4769 generic.go:334] "Generic (PLEG): container finished" podID="025219f0-bc69-4a33-acaa-b055607272bb" containerID="b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525" exitCode=1 Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.762257 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-s47tv" event={"ID":"025219f0-bc69-4a33-acaa-b055607272bb","Type":"ContainerDied","Data":"b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525"} Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.763262 4769 scope.go:117] "RemoveContainer" containerID="b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.768065 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.768118 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.768133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.768153 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.768165 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:26Z","lastTransitionTime":"2025-11-25T09:45:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.783132 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.800989 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.823829 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.851516 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:04Z\\\",\\\"message\\\":\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.174\\\\\\\", Port:9393, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:45:04.168194 6423 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 09:45:04.168194 6423 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1125 09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.868529 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.871138 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.871172 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.871184 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.871217 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.871236 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:26Z","lastTransitionTime":"2025-11-25T09:45:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.885573 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3cb8bb1-44ab-4ea0-8360-fd206e03b0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea75adfbd1af3891c8c83253f0cc56e3b7f0eef396bd625d683d955d9732eba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1defc8279ef3e3bcb7b94e0ebecd9adfa114c27b4aba6b07fc9cce9933ac3eed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cb7e7ffd92e069437d5588d6e1f2bcb3d776fe24abf845fa3554e8ef8ebacb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.899083 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.936655 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.959199 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.978935 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.978997 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.979011 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.979030 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.979041 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:26Z","lastTransitionTime":"2025-11-25T09:45:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.979809 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:26Z\\\",\\\"message\\\":\\\"2025-11-25T09:44:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b\\\\n2025-11-25T09:44:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b to /host/opt/cni/bin/\\\\n2025-11-25T09:44:41Z [verbose] multus-daemon started\\\\n2025-11-25T09:44:41Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:45:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:26 crc kubenswrapper[4769]: I1125 09:45:26.993510 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:26Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.006481 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.017472 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.029936 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.044372 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.058475 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.071955 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84194500-bce6-4d10-838c-a95f04acf31b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f2bc66817eb90eafa6eae43756cc4389973a5d3939e464e42a4348515e430d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.081487 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.081584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.081600 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.081620 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.081635 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:27Z","lastTransitionTime":"2025-11-25T09:45:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.094752 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.184480 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.184531 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.184542 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.184563 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.184575 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:27Z","lastTransitionTime":"2025-11-25T09:45:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.236498 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.236564 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.236614 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:27 crc kubenswrapper[4769]: E1125 09:45:27.236687 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:27 crc kubenswrapper[4769]: E1125 09:45:27.236900 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:27 crc kubenswrapper[4769]: E1125 09:45:27.237005 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.287661 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.287723 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.287746 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.287768 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.287783 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:27Z","lastTransitionTime":"2025-11-25T09:45:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.391266 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.391355 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.391379 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.391412 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.391433 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:27Z","lastTransitionTime":"2025-11-25T09:45:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.494604 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.494687 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.494710 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.494748 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.494773 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:27Z","lastTransitionTime":"2025-11-25T09:45:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.598410 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.598446 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.598457 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.598476 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.598486 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:27Z","lastTransitionTime":"2025-11-25T09:45:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.701553 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.701608 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.701623 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.701645 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.701660 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:27Z","lastTransitionTime":"2025-11-25T09:45:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.768563 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-s47tv_025219f0-bc69-4a33-acaa-b055607272bb/kube-multus/0.log" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.768631 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-s47tv" event={"ID":"025219f0-bc69-4a33-acaa-b055607272bb","Type":"ContainerStarted","Data":"9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87"} Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.784238 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84194500-bce6-4d10-838c-a95f04acf31b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f2bc66817eb90eafa6eae43756cc4389973a5d3939e464e42a4348515e430d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.802741 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.804721 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.804757 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.804765 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.804781 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.804791 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:27Z","lastTransitionTime":"2025-11-25T09:45:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.818822 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.831254 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.843242 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.857388 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.871163 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3cb8bb1-44ab-4ea0-8360-fd206e03b0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea75adfbd1af3891c8c83253f0cc56e3b7f0eef396bd625d683d955d9732eba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1defc8279ef3e3bcb7b94e0ebecd9adfa114c27b4aba6b07fc9cce9933ac3eed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cb7e7ffd92e069437d5588d6e1f2bcb3d776fe24abf845fa3554e8ef8ebacb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.886348 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.899611 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.907230 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.907278 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.907301 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.907322 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.907336 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:27Z","lastTransitionTime":"2025-11-25T09:45:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.911187 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.925375 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.944807 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:04Z\\\",\\\"message\\\":\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.174\\\\\\\", Port:9393, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:45:04.168194 6423 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 09:45:04.168194 6423 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1125 09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.959882 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.970046 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.982165 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:26Z\\\",\\\"message\\\":\\\"2025-11-25T09:44:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b\\\\n2025-11-25T09:44:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b to /host/opt/cni/bin/\\\\n2025-11-25T09:44:41Z [verbose] multus-daemon started\\\\n2025-11-25T09:44:41Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:45:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:45:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:27 crc kubenswrapper[4769]: I1125 09:45:27.997128 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:27Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.010341 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.010383 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.010393 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.010408 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.010417 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:28Z","lastTransitionTime":"2025-11-25T09:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.012686 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.025373 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:28Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.112584 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.112615 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.112623 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.112638 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.112646 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:28Z","lastTransitionTime":"2025-11-25T09:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.215498 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.215551 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.215567 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.215589 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.215604 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:28Z","lastTransitionTime":"2025-11-25T09:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.237365 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:28 crc kubenswrapper[4769]: E1125 09:45:28.237546 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.318278 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.318339 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.318353 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.318376 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.318389 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:28Z","lastTransitionTime":"2025-11-25T09:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.421276 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.421348 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.421362 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.421381 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.421392 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:28Z","lastTransitionTime":"2025-11-25T09:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.524422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.524473 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.524481 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.524500 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.524510 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:28Z","lastTransitionTime":"2025-11-25T09:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.628526 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.628581 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.628600 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.628620 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.628632 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:28Z","lastTransitionTime":"2025-11-25T09:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.732283 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.732315 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.732324 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.732339 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.732350 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:28Z","lastTransitionTime":"2025-11-25T09:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.836055 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.836100 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.836112 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.836130 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.836141 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:28Z","lastTransitionTime":"2025-11-25T09:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.939604 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.939666 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.939677 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.939694 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:28 crc kubenswrapper[4769]: I1125 09:45:28.939708 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:28Z","lastTransitionTime":"2025-11-25T09:45:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.041973 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.042015 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.042026 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.042042 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.042053 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.146644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.146717 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.146727 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.146749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.146760 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.236159 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.236224 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.236297 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:29 crc kubenswrapper[4769]: E1125 09:45:29.236321 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:29 crc kubenswrapper[4769]: E1125 09:45:29.236571 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:29 crc kubenswrapper[4769]: E1125 09:45:29.236759 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.249870 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.249922 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.249935 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.249956 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.249994 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.355355 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.355446 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.355464 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.356105 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.356132 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.363733 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.363759 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.363771 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.363786 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.363796 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: E1125 09:45:29.379421 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.383894 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.383925 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.383950 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.384007 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.384025 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: E1125 09:45:29.399917 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.404943 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.405014 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.405026 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.405053 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.405070 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: E1125 09:45:29.421671 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.426291 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.426330 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.426372 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.426393 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.426406 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: E1125 09:45:29.441370 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.445653 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.445729 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.445749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.445774 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.445821 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: E1125 09:45:29.459070 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:29Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:29 crc kubenswrapper[4769]: E1125 09:45:29.459223 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.460794 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.460842 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.460852 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.460869 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.460880 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.563500 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.563533 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.563541 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.563559 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.563569 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.665772 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.665812 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.665830 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.665851 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.665865 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.767908 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.767949 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.767978 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.767996 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.768007 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.870909 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.870953 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.870991 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.871010 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.871021 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.973756 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.973822 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.973836 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.973862 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:29 crc kubenswrapper[4769]: I1125 09:45:29.973877 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:29Z","lastTransitionTime":"2025-11-25T09:45:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.077216 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.077398 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.077424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.077451 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.077470 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:30Z","lastTransitionTime":"2025-11-25T09:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.181151 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.181209 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.181220 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.181240 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.181252 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:30Z","lastTransitionTime":"2025-11-25T09:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.236308 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:30 crc kubenswrapper[4769]: E1125 09:45:30.236568 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.283937 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.284061 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.284076 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.284098 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.284112 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:30Z","lastTransitionTime":"2025-11-25T09:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.387340 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.387401 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.387420 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.387444 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.387463 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:30Z","lastTransitionTime":"2025-11-25T09:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.490439 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.490486 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.490499 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.490519 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.490534 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:30Z","lastTransitionTime":"2025-11-25T09:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.594197 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.594273 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.594297 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.594341 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.594353 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:30Z","lastTransitionTime":"2025-11-25T09:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.698013 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.698064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.698076 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.698093 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.698104 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:30Z","lastTransitionTime":"2025-11-25T09:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.801688 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.801740 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.801750 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.801772 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.801795 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:30Z","lastTransitionTime":"2025-11-25T09:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.905268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.905331 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.905344 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.905367 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:30 crc kubenswrapper[4769]: I1125 09:45:30.905383 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:30Z","lastTransitionTime":"2025-11-25T09:45:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.008587 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.008653 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.008666 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.008686 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.008698 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:31Z","lastTransitionTime":"2025-11-25T09:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.113227 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.113278 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.113288 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.113303 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.113312 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:31Z","lastTransitionTime":"2025-11-25T09:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.216127 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.216173 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.216197 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.216214 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.216224 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:31Z","lastTransitionTime":"2025-11-25T09:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.236588 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:31 crc kubenswrapper[4769]: E1125 09:45:31.236734 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.236797 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.236881 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:31 crc kubenswrapper[4769]: E1125 09:45:31.237098 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:31 crc kubenswrapper[4769]: E1125 09:45:31.237241 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.319380 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.319701 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.319889 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.320016 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.320107 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:31Z","lastTransitionTime":"2025-11-25T09:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.422804 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.422853 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.422878 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.422898 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.422909 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:31Z","lastTransitionTime":"2025-11-25T09:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.526357 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.526411 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.526423 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.526451 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.526466 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:31Z","lastTransitionTime":"2025-11-25T09:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.629781 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.629851 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.629875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.629907 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.629929 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:31Z","lastTransitionTime":"2025-11-25T09:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.732922 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.733001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.733016 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.733039 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.733054 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:31Z","lastTransitionTime":"2025-11-25T09:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.835273 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.835341 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.835354 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.835371 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.835383 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:31Z","lastTransitionTime":"2025-11-25T09:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.938328 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.938733 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.938744 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.938761 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:31 crc kubenswrapper[4769]: I1125 09:45:31.938775 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:31Z","lastTransitionTime":"2025-11-25T09:45:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.041152 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.041200 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.041211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.041231 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.041241 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:32Z","lastTransitionTime":"2025-11-25T09:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.144402 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.144450 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.144460 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.144478 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.144489 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:32Z","lastTransitionTime":"2025-11-25T09:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.236786 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:32 crc kubenswrapper[4769]: E1125 09:45:32.238109 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.239119 4769 scope.go:117] "RemoveContainer" containerID="a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.251469 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.251511 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.251522 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.251541 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.251552 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:32Z","lastTransitionTime":"2025-11-25T09:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.269112 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.285588 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.303248 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:26Z\\\",\\\"message\\\":\\\"2025-11-25T09:44:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b\\\\n2025-11-25T09:44:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b to /host/opt/cni/bin/\\\\n2025-11-25T09:44:41Z [verbose] multus-daemon started\\\\n2025-11-25T09:44:41Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:45:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:45:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.337057 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.354608 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.355548 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.355613 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.355624 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.355645 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.355709 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:32Z","lastTransitionTime":"2025-11-25T09:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.367824 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.382308 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.396697 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.410408 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84194500-bce6-4d10-838c-a95f04acf31b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f2bc66817eb90eafa6eae43756cc4389973a5d3939e464e42a4348515e430d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.427393 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.446876 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.458481 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.458528 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.458541 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.458562 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.458575 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:32Z","lastTransitionTime":"2025-11-25T09:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.463667 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.481912 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.507095 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:04Z\\\",\\\"message\\\":\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.174\\\\\\\", Port:9393, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:45:04.168194 6423 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 09:45:04.168194 6423 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1125 09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.522140 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.536456 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3cb8bb1-44ab-4ea0-8360-fd206e03b0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea75adfbd1af3891c8c83253f0cc56e3b7f0eef396bd625d683d955d9732eba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1defc8279ef3e3bcb7b94e0ebecd9adfa114c27b4aba6b07fc9cce9933ac3eed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cb7e7ffd92e069437d5588d6e1f2bcb3d776fe24abf845fa3554e8ef8ebacb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.551603 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.563688 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.563754 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.563768 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.563842 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.563859 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:32Z","lastTransitionTime":"2025-11-25T09:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.565896 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.667749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.667807 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.667821 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.667846 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.667862 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:32Z","lastTransitionTime":"2025-11-25T09:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.770109 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.770150 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.770160 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.770175 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.770186 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:32Z","lastTransitionTime":"2025-11-25T09:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.788178 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/2.log" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.791487 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f"} Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.792027 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.816664 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.839343 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:04Z\\\",\\\"message\\\":\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.174\\\\\\\", Port:9393, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:45:04.168194 6423 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 09:45:04.168194 6423 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1125 09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.853468 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.867121 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3cb8bb1-44ab-4ea0-8360-fd206e03b0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea75adfbd1af3891c8c83253f0cc56e3b7f0eef396bd625d683d955d9732eba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1defc8279ef3e3bcb7b94e0ebecd9adfa114c27b4aba6b07fc9cce9933ac3eed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cb7e7ffd92e069437d5588d6e1f2bcb3d776fe24abf845fa3554e8ef8ebacb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.872424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.872460 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.872471 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.872491 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.872506 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:32Z","lastTransitionTime":"2025-11-25T09:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.881644 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.895982 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.909584 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.924423 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.940283 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.957937 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:26Z\\\",\\\"message\\\":\\\"2025-11-25T09:44:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b\\\\n2025-11-25T09:44:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b to /host/opt/cni/bin/\\\\n2025-11-25T09:44:41Z [verbose] multus-daemon started\\\\n2025-11-25T09:44:41Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:45:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:45:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.975822 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.975880 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.975894 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.975912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.975926 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:32Z","lastTransitionTime":"2025-11-25T09:45:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.977222 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:32 crc kubenswrapper[4769]: I1125 09:45:32.992208 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:32Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.008155 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.020850 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.032179 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84194500-bce6-4d10-838c-a95f04acf31b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f2bc66817eb90eafa6eae43756cc4389973a5d3939e464e42a4348515e430d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.045414 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.062062 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.073029 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.079184 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.079246 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.079260 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.079288 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.079310 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:33Z","lastTransitionTime":"2025-11-25T09:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.182738 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.182793 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.182832 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.182850 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.182860 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:33Z","lastTransitionTime":"2025-11-25T09:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.236611 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.236660 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:33 crc kubenswrapper[4769]: E1125 09:45:33.236811 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.236632 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:33 crc kubenswrapper[4769]: E1125 09:45:33.236981 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:33 crc kubenswrapper[4769]: E1125 09:45:33.237078 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.286134 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.286214 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.286237 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.286267 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.286291 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:33Z","lastTransitionTime":"2025-11-25T09:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.389892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.389952 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.389997 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.390021 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.390039 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:33Z","lastTransitionTime":"2025-11-25T09:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.492758 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.492812 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.492824 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.492843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.492855 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:33Z","lastTransitionTime":"2025-11-25T09:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.595665 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.595707 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.595716 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.595735 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.595749 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:33Z","lastTransitionTime":"2025-11-25T09:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.699000 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.699046 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.699056 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.699073 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.699084 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:33Z","lastTransitionTime":"2025-11-25T09:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.797183 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/3.log" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.797826 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/2.log" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.801728 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f"} Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.801777 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f" exitCode=1 Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.801796 4769 scope.go:117] "RemoveContainer" containerID="a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.802234 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.802291 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.802305 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.802321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.802485 4769 scope.go:117] "RemoveContainer" containerID="c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f" Nov 25 09:45:33 crc kubenswrapper[4769]: E1125 09:45:33.802665 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\"" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.802824 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:33Z","lastTransitionTime":"2025-11-25T09:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.827499 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a2bedb2f61029e8dbf1a47b1b60b3321ca59a886e4794d84fa312bad3b97c2c7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:04Z\\\",\\\"message\\\":\\\", UUID:\\\\\\\"d937b3b3-82c3-4791-9a66-41b9fed53e9d\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-dns-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-dns-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.174\\\\\\\", Port:9393, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 09:45:04.168194 6423 obj_retry.go:303] Retry object setup: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 09:45:04.168194 6423 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-console/networking-console-plugin-85b44fc459-gdk6g\\\\nI1125 09\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:33Z\\\",\\\"message\\\":\\\"nts]} name:Service_openshift-apiserver/check-endpoints_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.139:17698:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8efa4d1a-72f5-4dfa-9bc2-9d93ef11ecf2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 09:45:33.166612 6820 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z]\\\\nI1125 09:45:33.166629 6820 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.845856 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.859215 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3cb8bb1-44ab-4ea0-8360-fd206e03b0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea75adfbd1af3891c8c83253f0cc56e3b7f0eef396bd625d683d955d9732eba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1defc8279ef3e3bcb7b94e0ebecd9adfa114c27b4aba6b07fc9cce9933ac3eed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cb7e7ffd92e069437d5588d6e1f2bcb3d776fe24abf845fa3554e8ef8ebacb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.877077 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.890034 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.902730 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.908473 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.908517 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.908532 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.908554 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.908572 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:33Z","lastTransitionTime":"2025-11-25T09:45:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.921348 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.944235 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.959542 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.980497 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:26Z\\\",\\\"message\\\":\\\"2025-11-25T09:44:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b\\\\n2025-11-25T09:44:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b to /host/opt/cni/bin/\\\\n2025-11-25T09:44:41Z [verbose] multus-daemon started\\\\n2025-11-25T09:44:41Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:45:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:45:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:33 crc kubenswrapper[4769]: I1125 09:45:33.996704 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.011784 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.011821 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.011834 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.011853 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.011866 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:34Z","lastTransitionTime":"2025-11-25T09:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.018454 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.034673 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.049944 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84194500-bce6-4d10-838c-a95f04acf31b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f2bc66817eb90eafa6eae43756cc4389973a5d3939e464e42a4348515e430d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.064439 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.080502 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.092729 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.107594 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.114538 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.114590 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.114601 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.114616 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.114627 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:34Z","lastTransitionTime":"2025-11-25T09:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.218016 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.218085 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.218110 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.218143 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.218165 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:34Z","lastTransitionTime":"2025-11-25T09:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.236824 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:34 crc kubenswrapper[4769]: E1125 09:45:34.237099 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.321708 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.321780 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.321795 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.321819 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.321833 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:34Z","lastTransitionTime":"2025-11-25T09:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.425568 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.425623 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.425634 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.425649 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.425662 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:34Z","lastTransitionTime":"2025-11-25T09:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.528715 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.528779 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.528798 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.528826 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.528848 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:34Z","lastTransitionTime":"2025-11-25T09:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.631940 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.632015 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.632033 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.632060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.632074 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:34Z","lastTransitionTime":"2025-11-25T09:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.735220 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.735320 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.735334 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.735353 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.735365 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:34Z","lastTransitionTime":"2025-11-25T09:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.807601 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/3.log" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.812322 4769 scope.go:117] "RemoveContainer" containerID="c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f" Nov 25 09:45:34 crc kubenswrapper[4769]: E1125 09:45:34.812567 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\"" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.825256 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.838210 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.838245 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.838254 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.838269 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.838279 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:34Z","lastTransitionTime":"2025-11-25T09:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.839728 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.850801 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.861877 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.873621 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.883366 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.893413 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.903766 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84194500-bce6-4d10-838c-a95f04acf31b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f2bc66817eb90eafa6eae43756cc4389973a5d3939e464e42a4348515e430d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.915367 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.926810 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.937870 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.940685 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.940733 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.940757 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.940781 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.940796 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:34Z","lastTransitionTime":"2025-11-25T09:45:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.952782 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.972626 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:33Z\\\",\\\"message\\\":\\\"nts]} name:Service_openshift-apiserver/check-endpoints_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.139:17698:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8efa4d1a-72f5-4dfa-9bc2-9d93ef11ecf2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 09:45:33.166612 6820 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z]\\\\nI1125 09:45:33.166629 6820 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.986393 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.986663 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:34 crc kubenswrapper[4769]: E1125 09:45:34.986844 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:45:34 crc kubenswrapper[4769]: E1125 09:45:34.986869 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:45:34 crc kubenswrapper[4769]: E1125 09:45:34.986883 4769 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:45:34 crc kubenswrapper[4769]: E1125 09:45:34.986939 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 09:46:38.986925122 +0000 UTC m=+147.571897435 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:45:34 crc kubenswrapper[4769]: I1125 09:45:34.998071 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3cb8bb1-44ab-4ea0-8360-fd206e03b0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea75adfbd1af3891c8c83253f0cc56e3b7f0eef396bd625d683d955d9732eba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1defc8279ef3e3bcb7b94e0ebecd9adfa114c27b4aba6b07fc9cce9933ac3eed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cb7e7ffd92e069437d5588d6e1f2bcb3d776fe24abf845fa3554e8ef8ebacb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:34Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.010923 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:26Z\\\",\\\"message\\\":\\\"2025-11-25T09:44:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b\\\\n2025-11-25T09:44:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b to /host/opt/cni/bin/\\\\n2025-11-25T09:44:41Z [verbose] multus-daemon started\\\\n2025-11-25T09:44:41Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:45:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:45:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.024315 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.036930 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:35Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.048487 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.048527 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.048537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.048552 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.048564 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:35Z","lastTransitionTime":"2025-11-25T09:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.087498 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.087681 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.087740 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:35 crc kubenswrapper[4769]: E1125 09:45:35.087815 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:39.08773821 +0000 UTC m=+147.672710593 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.087915 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:35 crc kubenswrapper[4769]: E1125 09:45:35.087945 4769 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:45:35 crc kubenswrapper[4769]: E1125 09:45:35.087824 4769 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:45:35 crc kubenswrapper[4769]: E1125 09:45:35.088093 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:46:39.08806879 +0000 UTC m=+147.673041103 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 09:45:35 crc kubenswrapper[4769]: E1125 09:45:35.088145 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 09:46:39.088114601 +0000 UTC m=+147.673086914 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 09:45:35 crc kubenswrapper[4769]: E1125 09:45:35.088233 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 09:45:35 crc kubenswrapper[4769]: E1125 09:45:35.088254 4769 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 09:45:35 crc kubenswrapper[4769]: E1125 09:45:35.088269 4769 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:45:35 crc kubenswrapper[4769]: E1125 09:45:35.088316 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 09:46:39.088303666 +0000 UTC m=+147.673276059 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.152248 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.152301 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.152309 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.152326 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.152337 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:35Z","lastTransitionTime":"2025-11-25T09:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.236470 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.236470 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.236470 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:35 crc kubenswrapper[4769]: E1125 09:45:35.236748 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:35 crc kubenswrapper[4769]: E1125 09:45:35.236808 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:35 crc kubenswrapper[4769]: E1125 09:45:35.236622 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.254862 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.254909 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.254922 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.254942 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.254957 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:35Z","lastTransitionTime":"2025-11-25T09:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.357660 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.357724 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.357734 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.357748 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.357759 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:35Z","lastTransitionTime":"2025-11-25T09:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.460622 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.460667 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.460679 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.460708 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.460720 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:35Z","lastTransitionTime":"2025-11-25T09:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.564720 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.564765 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.564778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.564797 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.564810 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:35Z","lastTransitionTime":"2025-11-25T09:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.668486 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.668545 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.668558 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.668578 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.668593 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:35Z","lastTransitionTime":"2025-11-25T09:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.771107 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.771182 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.771202 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.771230 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.771251 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:35Z","lastTransitionTime":"2025-11-25T09:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.874395 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.874477 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.874502 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.874539 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.874608 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:35Z","lastTransitionTime":"2025-11-25T09:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.978028 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.978110 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.978134 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.978168 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:35 crc kubenswrapper[4769]: I1125 09:45:35.978194 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:35Z","lastTransitionTime":"2025-11-25T09:45:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.080422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.080476 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.080488 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.080504 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.080519 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:36Z","lastTransitionTime":"2025-11-25T09:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.183689 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.183761 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.183778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.183821 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.183839 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:36Z","lastTransitionTime":"2025-11-25T09:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.236107 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:36 crc kubenswrapper[4769]: E1125 09:45:36.236365 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.286644 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.286682 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.286691 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.286724 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.286736 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:36Z","lastTransitionTime":"2025-11-25T09:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.389783 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.389834 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.389848 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.389869 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.389882 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:36Z","lastTransitionTime":"2025-11-25T09:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.492882 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.492949 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.492962 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.493009 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.493021 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:36Z","lastTransitionTime":"2025-11-25T09:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.596004 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.596048 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.596058 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.596076 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.596088 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:36Z","lastTransitionTime":"2025-11-25T09:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.699492 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.699565 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.699586 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.699616 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.699635 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:36Z","lastTransitionTime":"2025-11-25T09:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.803154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.803223 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.803236 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.803267 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.803278 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:36Z","lastTransitionTime":"2025-11-25T09:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.907545 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.907617 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.907633 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.907657 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:36 crc kubenswrapper[4769]: I1125 09:45:36.907676 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:36Z","lastTransitionTime":"2025-11-25T09:45:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.011035 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.011097 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.011117 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.011138 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.011149 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:37Z","lastTransitionTime":"2025-11-25T09:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.114511 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.114556 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.114569 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.114591 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.114604 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:37Z","lastTransitionTime":"2025-11-25T09:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.217624 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.217695 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.217712 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.217740 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.217759 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:37Z","lastTransitionTime":"2025-11-25T09:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.236268 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.236379 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.236301 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:37 crc kubenswrapper[4769]: E1125 09:45:37.236478 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:37 crc kubenswrapper[4769]: E1125 09:45:37.236711 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:37 crc kubenswrapper[4769]: E1125 09:45:37.236959 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.320789 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.320866 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.320885 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.320913 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.320933 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:37Z","lastTransitionTime":"2025-11-25T09:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.424201 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.424273 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.424289 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.424309 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.424322 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:37Z","lastTransitionTime":"2025-11-25T09:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.527093 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.527145 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.527157 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.527173 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.527185 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:37Z","lastTransitionTime":"2025-11-25T09:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.636414 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.636481 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.636495 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.636518 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.636532 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:37Z","lastTransitionTime":"2025-11-25T09:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.740801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.740861 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.740875 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.740899 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.740914 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:37Z","lastTransitionTime":"2025-11-25T09:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.844586 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.844639 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.844652 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.844673 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.844685 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:37Z","lastTransitionTime":"2025-11-25T09:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.948195 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.948247 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.948264 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.948288 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:37 crc kubenswrapper[4769]: I1125 09:45:37.948304 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:37Z","lastTransitionTime":"2025-11-25T09:45:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.052278 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.052336 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.052345 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.052361 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.052371 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:38Z","lastTransitionTime":"2025-11-25T09:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.155789 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.155843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.155855 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.155874 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.155889 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:38Z","lastTransitionTime":"2025-11-25T09:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.236512 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:38 crc kubenswrapper[4769]: E1125 09:45:38.236763 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.258590 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.258625 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.258641 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.258667 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.258679 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:38Z","lastTransitionTime":"2025-11-25T09:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.362269 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.362333 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.362360 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.362394 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.362421 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:38Z","lastTransitionTime":"2025-11-25T09:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.466247 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.466295 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.466306 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.466328 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.466341 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:38Z","lastTransitionTime":"2025-11-25T09:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.569356 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.569466 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.569485 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.569512 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.569536 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:38Z","lastTransitionTime":"2025-11-25T09:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.673341 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.673396 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.673414 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.673439 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.673458 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:38Z","lastTransitionTime":"2025-11-25T09:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.776999 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.777048 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.777060 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.777082 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.777094 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:38Z","lastTransitionTime":"2025-11-25T09:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.880801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.880857 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.880878 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.880900 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.880916 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:38Z","lastTransitionTime":"2025-11-25T09:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.984617 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.984717 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.984741 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.984782 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:38 crc kubenswrapper[4769]: I1125 09:45:38.984830 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:38Z","lastTransitionTime":"2025-11-25T09:45:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.089227 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.089284 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.089310 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.089338 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.089354 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.193459 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.193510 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.193523 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.193543 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.193555 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.236374 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.236488 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.236374 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:39 crc kubenswrapper[4769]: E1125 09:45:39.236575 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:39 crc kubenswrapper[4769]: E1125 09:45:39.236656 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:39 crc kubenswrapper[4769]: E1125 09:45:39.236750 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.296951 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.297013 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.297025 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.297041 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.297052 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.399865 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.399915 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.399930 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.400020 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.400042 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.503944 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.504019 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.504036 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.504059 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.504077 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.607192 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.607234 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.607246 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.607266 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.607277 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.670736 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.670783 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.670794 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.670832 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.670846 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: E1125 09:45:39.683692 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.688345 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.688639 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.688714 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.688795 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.688933 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: E1125 09:45:39.702170 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.706425 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.706468 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.706484 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.706511 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.706530 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: E1125 09:45:39.718841 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.729478 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.729533 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.729547 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.729569 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.729582 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: E1125 09:45:39.745924 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.751250 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.751318 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.751336 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.751354 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.751366 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: E1125 09:45:39.767732 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:39Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:39 crc kubenswrapper[4769]: E1125 09:45:39.767904 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.769771 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.769823 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.769846 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.769870 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.769890 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.880765 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.880809 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.880820 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.880835 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.880846 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.984344 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.984411 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.984422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.984441 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:39 crc kubenswrapper[4769]: I1125 09:45:39.984452 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:39Z","lastTransitionTime":"2025-11-25T09:45:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.086716 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.087087 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.087182 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.087278 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.087376 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:40Z","lastTransitionTime":"2025-11-25T09:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.190468 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.190714 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.190818 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.190891 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.190950 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:40Z","lastTransitionTime":"2025-11-25T09:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.236299 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:40 crc kubenswrapper[4769]: E1125 09:45:40.236558 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.294403 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.294455 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.294466 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.294485 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.294497 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:40Z","lastTransitionTime":"2025-11-25T09:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.397552 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.397876 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.397953 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.398074 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.398174 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:40Z","lastTransitionTime":"2025-11-25T09:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.501791 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.501857 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.501869 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.501890 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.501903 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:40Z","lastTransitionTime":"2025-11-25T09:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.605685 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.605745 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.605758 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.605792 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.605807 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:40Z","lastTransitionTime":"2025-11-25T09:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.708910 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.709372 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.709599 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.709800 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.710037 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:40Z","lastTransitionTime":"2025-11-25T09:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.813936 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.814037 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.814053 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.814074 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.814091 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:40Z","lastTransitionTime":"2025-11-25T09:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.916747 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.917707 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.917855 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.918022 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:40 crc kubenswrapper[4769]: I1125 09:45:40.918200 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:40Z","lastTransitionTime":"2025-11-25T09:45:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.021161 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.021250 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.021265 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.021284 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.021300 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:41Z","lastTransitionTime":"2025-11-25T09:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.123839 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.123892 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.123906 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.123926 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.123939 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:41Z","lastTransitionTime":"2025-11-25T09:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.227705 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.227815 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.227841 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.227871 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.227895 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:41Z","lastTransitionTime":"2025-11-25T09:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.236531 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.236572 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.236601 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:41 crc kubenswrapper[4769]: E1125 09:45:41.237060 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:41 crc kubenswrapper[4769]: E1125 09:45:41.237291 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:41 crc kubenswrapper[4769]: E1125 09:45:41.237484 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.331117 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.331559 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.331695 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.331835 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.332007 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:41Z","lastTransitionTime":"2025-11-25T09:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.435281 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.435728 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.435822 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.435896 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.435999 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:41Z","lastTransitionTime":"2025-11-25T09:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.539108 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.539186 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.539206 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.539233 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.539251 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:41Z","lastTransitionTime":"2025-11-25T09:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.641789 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.642109 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.642211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.642279 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.642343 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:41Z","lastTransitionTime":"2025-11-25T09:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.745515 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.745588 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.745611 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.745650 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.745676 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:41Z","lastTransitionTime":"2025-11-25T09:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.848100 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.848162 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.848174 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.848193 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.848212 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:41Z","lastTransitionTime":"2025-11-25T09:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.951728 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.951801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.951825 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.951872 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:41 crc kubenswrapper[4769]: I1125 09:45:41.951894 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:41Z","lastTransitionTime":"2025-11-25T09:45:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.055175 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.055242 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.055253 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.055272 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.055288 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:42Z","lastTransitionTime":"2025-11-25T09:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.164275 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.164702 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.164774 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.164844 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.164902 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:42Z","lastTransitionTime":"2025-11-25T09:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.236875 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:42 crc kubenswrapper[4769]: E1125 09:45:42.237233 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.257769 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.268516 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.268567 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.268582 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.268604 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.268620 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:42Z","lastTransitionTime":"2025-11-25T09:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.281583 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.296700 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.316611 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.335462 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84194500-bce6-4d10-838c-a95f04acf31b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f2bc66817eb90eafa6eae43756cc4389973a5d3939e464e42a4348515e430d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.354429 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.371150 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.371987 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.372041 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.372058 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.372084 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.372103 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:42Z","lastTransitionTime":"2025-11-25T09:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.383791 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.401385 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.427855 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:33Z\\\",\\\"message\\\":\\\"nts]} name:Service_openshift-apiserver/check-endpoints_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.139:17698:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8efa4d1a-72f5-4dfa-9bc2-9d93ef11ecf2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 09:45:33.166612 6820 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z]\\\\nI1125 09:45:33.166629 6820 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.442452 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.455868 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3cb8bb1-44ab-4ea0-8360-fd206e03b0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea75adfbd1af3891c8c83253f0cc56e3b7f0eef396bd625d683d955d9732eba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1defc8279ef3e3bcb7b94e0ebecd9adfa114c27b4aba6b07fc9cce9933ac3eed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cb7e7ffd92e069437d5588d6e1f2bcb3d776fe24abf845fa3554e8ef8ebacb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.474276 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:26Z\\\",\\\"message\\\":\\\"2025-11-25T09:44:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b\\\\n2025-11-25T09:44:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b to /host/opt/cni/bin/\\\\n2025-11-25T09:44:41Z [verbose] multus-daemon started\\\\n2025-11-25T09:44:41Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:45:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:45:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.477067 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.477151 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.477198 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.477230 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.477246 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:42Z","lastTransitionTime":"2025-11-25T09:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.491477 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.506850 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.520874 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.533429 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.546746 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:42Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.579624 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.579676 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.579685 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.579699 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.579709 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:42Z","lastTransitionTime":"2025-11-25T09:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.683477 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.683532 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.683553 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.683575 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.683589 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:42Z","lastTransitionTime":"2025-11-25T09:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.787384 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.787441 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.787459 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.787482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.787499 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:42Z","lastTransitionTime":"2025-11-25T09:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.890595 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.890663 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.890680 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.890706 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.890733 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:42Z","lastTransitionTime":"2025-11-25T09:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.994020 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.994086 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.994106 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.994133 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:42 crc kubenswrapper[4769]: I1125 09:45:42.994154 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:42Z","lastTransitionTime":"2025-11-25T09:45:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.098229 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.098278 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.098291 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.098313 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.098326 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:43Z","lastTransitionTime":"2025-11-25T09:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.201251 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.201332 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.201351 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.201382 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.201402 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:43Z","lastTransitionTime":"2025-11-25T09:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.236026 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.236087 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.236139 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:43 crc kubenswrapper[4769]: E1125 09:45:43.236242 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:43 crc kubenswrapper[4769]: E1125 09:45:43.236467 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:43 crc kubenswrapper[4769]: E1125 09:45:43.236556 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.305461 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.305527 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.305539 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.305559 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.305571 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:43Z","lastTransitionTime":"2025-11-25T09:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.409414 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.409479 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.409490 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.409509 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.409530 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:43Z","lastTransitionTime":"2025-11-25T09:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.513107 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.513154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.513171 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.513191 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.513205 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:43Z","lastTransitionTime":"2025-11-25T09:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.616132 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.616196 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.616214 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.616237 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.616253 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:43Z","lastTransitionTime":"2025-11-25T09:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.719229 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.719303 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.719322 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.719346 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.719362 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:43Z","lastTransitionTime":"2025-11-25T09:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.822645 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.822778 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.822843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.822873 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.822931 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:43Z","lastTransitionTime":"2025-11-25T09:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.926266 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.926347 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.926370 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.926396 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:43 crc kubenswrapper[4769]: I1125 09:45:43.926417 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:43Z","lastTransitionTime":"2025-11-25T09:45:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.030267 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.030348 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.030369 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.030401 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.030422 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:44Z","lastTransitionTime":"2025-11-25T09:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.134586 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.134666 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.134724 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.134757 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.134780 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:44Z","lastTransitionTime":"2025-11-25T09:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.236409 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:44 crc kubenswrapper[4769]: E1125 09:45:44.236639 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.238667 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.238738 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.238750 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.238773 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.238790 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:44Z","lastTransitionTime":"2025-11-25T09:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.341469 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.341521 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.341533 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.341549 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.341560 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:44Z","lastTransitionTime":"2025-11-25T09:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.444533 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.444609 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.444625 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.444650 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.444666 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:44Z","lastTransitionTime":"2025-11-25T09:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.548023 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.548108 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.548131 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.548162 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.548174 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:44Z","lastTransitionTime":"2025-11-25T09:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.651744 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.651815 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.651840 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.651870 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.651892 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:44Z","lastTransitionTime":"2025-11-25T09:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.754911 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.754994 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.755007 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.755029 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.755044 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:44Z","lastTransitionTime":"2025-11-25T09:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.857501 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.857546 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.857554 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.857568 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.857578 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:44Z","lastTransitionTime":"2025-11-25T09:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.960129 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.960184 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.960195 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.960214 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:44 crc kubenswrapper[4769]: I1125 09:45:44.960225 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:44Z","lastTransitionTime":"2025-11-25T09:45:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.062868 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.062920 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.062930 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.063005 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.063036 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:45Z","lastTransitionTime":"2025-11-25T09:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.165736 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.165789 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.165804 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.165823 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.165835 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:45Z","lastTransitionTime":"2025-11-25T09:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.236033 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.236065 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.236039 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:45 crc kubenswrapper[4769]: E1125 09:45:45.236243 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:45 crc kubenswrapper[4769]: E1125 09:45:45.236426 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:45 crc kubenswrapper[4769]: E1125 09:45:45.236741 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.269001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.269072 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.269097 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.269130 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.269154 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:45Z","lastTransitionTime":"2025-11-25T09:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.371547 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.371601 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.371612 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.371629 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.371642 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:45Z","lastTransitionTime":"2025-11-25T09:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.474789 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.474860 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.474880 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.474911 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.474934 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:45Z","lastTransitionTime":"2025-11-25T09:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.577851 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.577919 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.577941 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.578012 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.578026 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:45Z","lastTransitionTime":"2025-11-25T09:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.681326 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.681406 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.681421 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.681445 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.681460 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:45Z","lastTransitionTime":"2025-11-25T09:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.784397 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.784479 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.784504 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.784537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.784559 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:45Z","lastTransitionTime":"2025-11-25T09:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.895194 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.895284 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.895316 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.895352 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.895376 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:45Z","lastTransitionTime":"2025-11-25T09:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.999675 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.999765 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:45 crc kubenswrapper[4769]: I1125 09:45:45.999790 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:45.999821 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:45.999841 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:45Z","lastTransitionTime":"2025-11-25T09:45:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.103626 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.103684 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.103697 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.103721 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.103736 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:46Z","lastTransitionTime":"2025-11-25T09:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.206765 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.206801 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.206810 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.206825 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.206834 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:46Z","lastTransitionTime":"2025-11-25T09:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.236228 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:46 crc kubenswrapper[4769]: E1125 09:45:46.236480 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.309519 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.309862 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.310063 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.310206 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.310273 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:46Z","lastTransitionTime":"2025-11-25T09:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.413041 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.413082 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.413090 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.413107 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.413118 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:46Z","lastTransitionTime":"2025-11-25T09:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.516598 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.516673 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.516690 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.516716 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.516733 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:46Z","lastTransitionTime":"2025-11-25T09:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.620350 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.620432 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.620478 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.620518 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.620541 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:46Z","lastTransitionTime":"2025-11-25T09:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.723505 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.723562 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.723573 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.723592 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.723602 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:46Z","lastTransitionTime":"2025-11-25T09:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.826903 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.827001 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.827027 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.827064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.827083 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:46Z","lastTransitionTime":"2025-11-25T09:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.930707 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.930774 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.930793 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.930859 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:46 crc kubenswrapper[4769]: I1125 09:45:46.930878 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:46Z","lastTransitionTime":"2025-11-25T09:45:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.035149 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.035242 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.035255 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.035275 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.035288 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:47Z","lastTransitionTime":"2025-11-25T09:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.139053 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.139120 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.139130 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.139152 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.139164 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:47Z","lastTransitionTime":"2025-11-25T09:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.236455 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:47 crc kubenswrapper[4769]: E1125 09:45:47.236897 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.236541 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.236515 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:47 crc kubenswrapper[4769]: E1125 09:45:47.237007 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:47 crc kubenswrapper[4769]: E1125 09:45:47.237064 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.242118 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.242173 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.242186 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.242204 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.242215 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:47Z","lastTransitionTime":"2025-11-25T09:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.345304 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.345359 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.345377 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.345437 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.345458 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:47Z","lastTransitionTime":"2025-11-25T09:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.447989 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.448031 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.448041 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.448055 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.448066 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:47Z","lastTransitionTime":"2025-11-25T09:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.550714 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.550769 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.550783 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.550802 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.550814 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:47Z","lastTransitionTime":"2025-11-25T09:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.654566 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.654630 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.654649 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.654673 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.654691 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:47Z","lastTransitionTime":"2025-11-25T09:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.757409 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.757468 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.757480 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.757498 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.757509 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:47Z","lastTransitionTime":"2025-11-25T09:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.860070 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.860134 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.860145 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.860162 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.860174 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:47Z","lastTransitionTime":"2025-11-25T09:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.963275 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.963342 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.963354 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.963374 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:47 crc kubenswrapper[4769]: I1125 09:45:47.963390 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:47Z","lastTransitionTime":"2025-11-25T09:45:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.066924 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.067021 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.067044 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.067102 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.067127 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:48Z","lastTransitionTime":"2025-11-25T09:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.170742 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.170798 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.170808 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.170827 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.170841 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:48Z","lastTransitionTime":"2025-11-25T09:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.236388 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:48 crc kubenswrapper[4769]: E1125 09:45:48.236659 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.237326 4769 scope.go:117] "RemoveContainer" containerID="c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f" Nov 25 09:45:48 crc kubenswrapper[4769]: E1125 09:45:48.237664 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\"" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.274065 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.274123 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.274191 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.274216 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.274230 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:48Z","lastTransitionTime":"2025-11-25T09:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.377365 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.377429 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.377442 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.377464 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.377478 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:48Z","lastTransitionTime":"2025-11-25T09:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.480800 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.480845 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.480859 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.480879 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.480892 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:48Z","lastTransitionTime":"2025-11-25T09:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.583910 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.584052 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.584070 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.584093 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.584109 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:48Z","lastTransitionTime":"2025-11-25T09:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.687522 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.687568 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.687577 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.687593 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.687603 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:48Z","lastTransitionTime":"2025-11-25T09:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.791602 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.791677 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.791696 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.791721 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.791738 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:48Z","lastTransitionTime":"2025-11-25T09:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.894241 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.894292 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.894307 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.894328 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.894344 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:48Z","lastTransitionTime":"2025-11-25T09:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.996603 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.996653 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.996665 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.996683 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:48 crc kubenswrapper[4769]: I1125 09:45:48.996693 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:48Z","lastTransitionTime":"2025-11-25T09:45:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.099859 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.100026 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.100043 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.100083 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.100095 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:49Z","lastTransitionTime":"2025-11-25T09:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.203606 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.203676 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.203693 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.203732 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.203751 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:49Z","lastTransitionTime":"2025-11-25T09:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.236484 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.236511 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.236621 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:49 crc kubenswrapper[4769]: E1125 09:45:49.236729 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:49 crc kubenswrapper[4769]: E1125 09:45:49.237788 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:49 crc kubenswrapper[4769]: E1125 09:45:49.237889 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.307032 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.307081 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.307093 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.307109 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.307122 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:49Z","lastTransitionTime":"2025-11-25T09:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.409888 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.409952 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.409985 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.410004 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.410018 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:49Z","lastTransitionTime":"2025-11-25T09:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.513888 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.514028 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.514064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.514096 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.514147 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:49Z","lastTransitionTime":"2025-11-25T09:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.616995 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.617037 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.617048 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.617064 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.617075 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:49Z","lastTransitionTime":"2025-11-25T09:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.719638 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.719711 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.719722 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.719742 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.719757 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:49Z","lastTransitionTime":"2025-11-25T09:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.823482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.823546 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.823561 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.823585 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.823599 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:49Z","lastTransitionTime":"2025-11-25T09:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.917379 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.917454 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.917479 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.917510 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.917532 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:49Z","lastTransitionTime":"2025-11-25T09:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:49 crc kubenswrapper[4769]: E1125 09:45:49.939132 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.945140 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.945205 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.945224 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.945245 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.945262 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:49Z","lastTransitionTime":"2025-11-25T09:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:49 crc kubenswrapper[4769]: E1125 09:45:49.962143 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.966412 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.966466 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.966487 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.966513 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.966531 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:49Z","lastTransitionTime":"2025-11-25T09:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:49 crc kubenswrapper[4769]: E1125 09:45:49.992405 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:49Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.997808 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.997848 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.997864 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.997910 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:49 crc kubenswrapper[4769]: I1125 09:45:49.997929 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:49Z","lastTransitionTime":"2025-11-25T09:45:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:50 crc kubenswrapper[4769]: E1125 09:45:50.013827 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.018207 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.018250 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.018259 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.018275 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.018285 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:50Z","lastTransitionTime":"2025-11-25T09:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:50 crc kubenswrapper[4769]: E1125 09:45:50.032357 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T09:45:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2e5db0ee-b4dd-41e7-b399-6918209aec97\\\",\\\"systemUUID\\\":\\\"fb8d0e0c-b27f-49f2-81a7-fe75c397959e\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:50Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:50 crc kubenswrapper[4769]: E1125 09:45:50.032586 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.034601 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.034643 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.034658 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.034680 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.034697 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:50Z","lastTransitionTime":"2025-11-25T09:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.136860 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.136912 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.136929 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.136947 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.136974 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:50Z","lastTransitionTime":"2025-11-25T09:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.236559 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:50 crc kubenswrapper[4769]: E1125 09:45:50.236759 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.241661 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.241719 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.241733 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.241749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.241761 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:50Z","lastTransitionTime":"2025-11-25T09:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.254485 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.345921 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.345999 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.346015 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.346034 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.346054 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:50Z","lastTransitionTime":"2025-11-25T09:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.449306 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.449415 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.449435 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.449487 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.449504 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:50Z","lastTransitionTime":"2025-11-25T09:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.552271 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.552343 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.552361 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.552388 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.552406 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:50Z","lastTransitionTime":"2025-11-25T09:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.654773 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.654829 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.654841 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.654858 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.654874 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:50Z","lastTransitionTime":"2025-11-25T09:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.758697 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.758757 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.758770 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.758793 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.758807 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:50Z","lastTransitionTime":"2025-11-25T09:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.862841 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.863147 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.863180 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.863207 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.863220 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:50Z","lastTransitionTime":"2025-11-25T09:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.966776 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.966834 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.966847 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.966870 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:50 crc kubenswrapper[4769]: I1125 09:45:50.966883 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:50Z","lastTransitionTime":"2025-11-25T09:45:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.070222 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.070289 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.070313 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.070385 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.070409 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:51Z","lastTransitionTime":"2025-11-25T09:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.173401 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.173458 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.173475 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.173499 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.173515 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:51Z","lastTransitionTime":"2025-11-25T09:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.236235 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:51 crc kubenswrapper[4769]: E1125 09:45:51.236388 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.236252 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.236227 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:51 crc kubenswrapper[4769]: E1125 09:45:51.236471 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:51 crc kubenswrapper[4769]: E1125 09:45:51.236599 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.277711 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.277759 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.277770 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.277802 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.277820 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:51Z","lastTransitionTime":"2025-11-25T09:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.381139 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.381185 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.381195 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.381211 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.381224 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:51Z","lastTransitionTime":"2025-11-25T09:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.484490 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.484551 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.484569 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.484594 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.484614 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:51Z","lastTransitionTime":"2025-11-25T09:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.589401 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.589718 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.589734 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.589756 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.589776 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:51Z","lastTransitionTime":"2025-11-25T09:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.692842 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.692919 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.692939 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.693002 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.693029 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:51Z","lastTransitionTime":"2025-11-25T09:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.796944 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.797063 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.797088 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.797121 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.797145 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:51Z","lastTransitionTime":"2025-11-25T09:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.900460 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.900509 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.900524 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.900547 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:51 crc kubenswrapper[4769]: I1125 09:45:51.900565 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:51Z","lastTransitionTime":"2025-11-25T09:45:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.004421 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.004485 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.004499 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.004520 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.004538 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:52Z","lastTransitionTime":"2025-11-25T09:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.107780 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.107831 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.107843 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.107860 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.107874 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:52Z","lastTransitionTime":"2025-11-25T09:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.211384 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.211456 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.211471 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.211492 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.211507 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:52Z","lastTransitionTime":"2025-11-25T09:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.236855 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:52 crc kubenswrapper[4769]: E1125 09:45:52.238168 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.254620 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.271876 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.285009 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-v24zk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a18d0f06-2fff-4e1a-9b11-01eaea85baa1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://181a23a91023308fd2b334e8113f5e1da7b17d63f0031f4219ebd74213e936ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8gdv8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:41Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-v24zk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.302717 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-7khh9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"76770e00-0d61-45ae-9772-1e8c42dc6ea6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6phx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:52Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-7khh9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.314698 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"84194500-bce6-4d10-838c-a95f04acf31b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f2bc66817eb90eafa6eae43756cc4389973a5d3939e464e42a4348515e430d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ac282b65751506b0de6685f1e74ad6468022a459e40c124923fc1116c4b6c72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.315138 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.315193 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.315213 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.315239 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.315270 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:52Z","lastTransitionTime":"2025-11-25T09:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.334118 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"491d3b90-8ec4-48dd-bc56-200196b68930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://62b58345b1b2972c209332a824d83239d52dc4080dc56bc3910201ec6d80ce19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://937cee08a2e3eaae95000066a24d21f537fe665a2ecde2f31632a6b33bd51572\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba2c29dce4dc4b1f79f8af2caa9e415b46e19b02e0ba3412ac21f28a33309251\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://427f56858566336ea961bc9f474263907944de8c064e2222c2bfef173a85bd12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c52f6b6cbd891ac6ba040fc2e27c643e8e48795f82b943dd8b2d1f68744e4bd3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7def6cd379c7f5d75dc1bf2c67a6a758ac8565e366f0f907903566232b7746a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7def6cd379c7f5d75dc1bf2c67a6a758ac8565e366f0f907903566232b7746a5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://72a577eb9b7ce279cb1d93202a80689e2e517349b6d3d88d83b71f8304bd7dd5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://72a577eb9b7ce279cb1d93202a80689e2e517349b6d3d88d83b71f8304bd7dd5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://e2c14f8876af9710666f03b36306acae7a4af61c305d2e1aac2ce6b414d37fe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e2c14f8876af9710666f03b36306acae7a4af61c305d2e1aac2ce6b414d37fe9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.347880 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://89fbb83e7cad0ae1f970669d2cdb9bf9535d5b8e5ccec1962faa377fe263cc17\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddfc773720803f36e40054cc984d270f6ff699d660c66491adb65338a52e00e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.362422 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:35Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f2bfc42c354557e335b5586b8aae6051c137143fdd2419a04802375587244df0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.374125 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-kzpxc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb3a206d-ee72-415c-bc23-2e1b2d6f8592\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f88b14a3ffb863171ab8bbb4eb89877dfd711fadb03f45a6a337540d34c0a7d1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv2k5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:37Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-kzpxc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.387539 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-27dbp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46575153-4800-4ed2-8aa3-b66b98a9c899\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3ed3aea46c79a1ec2872cceee652c70923be3656e45414fecb60f293639c81c1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ac575874dce5e1ee7d9a810c0181a6613b5ec8ac603125a8efa0647cd90609f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42278a63f8af224a3110645a95c8251d16882e0c9678b24a9fa6dd2dea7a71f6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb188343498140905d92b83cea623c290124ad18d1ac248b27964a65bd2d8694\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://38ae24724734150c2c1ae2e0611edded6616e6d59ab0af03724c7990bed84192\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0f3d8f93083fc6614a694f25691d57128ad8f4d5e9095cb1915ddeacfcf2b296\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://937a427abdca6e70fd9b5aa5a475dfd164343edcf857dcb4a26943f195e9900d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brknc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-27dbp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.416900 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"069c06c6-fe60-41d0-b96d-86606f55b258\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:33Z\\\",\\\"message\\\":\\\"nts]} name:Service_openshift-apiserver/check-endpoints_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.139:17698:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {8efa4d1a-72f5-4dfa-9bc2-9d93ef11ecf2}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1125 09:45:33.166612 6820 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:33Z is after 2025-08-24T17:21:41Z]\\\\nI1125 09:45:33.166629 6820 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:45:32Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9wsqq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-kfvzs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.418244 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.418283 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.418297 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.418317 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.418330 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:52Z","lastTransitionTime":"2025-11-25T09:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.436188 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bbf363f-faf3-45da-a19f-54cc0119825c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T09:44:25Z\\\",\\\"message\\\":\\\"W1125 09:44:15.399269 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 09:44:15.400911 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764063855 cert, and key in /tmp/serving-cert-2102574939/serving-signer.crt, /tmp/serving-cert-2102574939/serving-signer.key\\\\nI1125 09:44:15.634430 1 observer_polling.go:159] Starting file observer\\\\nW1125 09:44:15.637907 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 09:44:15.638219 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 09:44:15.639555 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2102574939/tls.crt::/tmp/serving-cert-2102574939/tls.key\\\\\\\"\\\\nF1125 09:44:25.945399 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.451478 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3cb8bb1-44ab-4ea0-8360-fd206e03b0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ea75adfbd1af3891c8c83253f0cc56e3b7f0eef396bd625d683d955d9732eba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1defc8279ef3e3bcb7b94e0ebecd9adfa114c27b4aba6b07fc9cce9933ac3eed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://10cb7e7ffd92e069437d5588d6e1f2bcb3d776fe24abf845fa3554e8ef8ebacb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://676ced28573a95ae4d739e490ebdfa690170c0943a22cb1cbafdc2aeb84df172\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T09:44:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.467800 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-s47tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"025219f0-bc69-4a33-acaa-b055607272bb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:45:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T09:45:26Z\\\",\\\"message\\\":\\\"2025-11-25T09:44:40+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b\\\\n2025-11-25T09:44:40+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_524ab556-dd3a-4852-83f8-44456ea8689b to /host/opt/cni/bin/\\\\n2025-11-25T09:44:41Z [verbose] multus-daemon started\\\\n2025-11-25T09:44:41Z [verbose] Readiness Indicator file check\\\\n2025-11-25T09:45:26Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:45:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-n7rjx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-multus\"/\"multus-s47tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.482028 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c58a9381-bd89-4fd9-8217-f04646879968\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://58cd58c1df5cc30ce98fc70b6117deafa5ebeb2d1a7d7b481b62c38ab39c9290\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8b504d3d81cc7b0e4741363b8a9e4c0ea3a0c1af959c8b3def222b44a646b22b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fad541e8dbab3802d7acafd3a70a54f992d4578319dc441c41ffdffdd635d63\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.495605 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d58c71b5-5dc4-45c1-9b58-9740a35d2256\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdf114b30bdda719b3e55767f9df9f9e8e3e3e9238fa8403a388d7892922563e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vmtw6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:38Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-98mzt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.508385 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37fe6f3a-f80b-4d79-9825-8f1c67c64d5c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d18ad273888cf05de2af3469630aaf22e7e5313ab57628e0a68add8d24dfeabd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a1b88c3a27560378821e084a6e12a5cdb549acdd447f54ea696a79f8914baad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j6llt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T09:44:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wvkdt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.521613 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.521687 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.521697 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.521715 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.521727 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:52Z","lastTransitionTime":"2025-11-25T09:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.524005 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e477c71aa0332cf903853803df3a7cfda7c251a749b3f3d9226e0b6edbd34a57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T09:44:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.537650 4769 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T09:44:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T09:45:52Z is after 2025-08-24T17:21:41Z" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.624579 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.624627 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.624636 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.624653 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.624665 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:52Z","lastTransitionTime":"2025-11-25T09:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.727601 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.727637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.727647 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.727661 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.727671 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:52Z","lastTransitionTime":"2025-11-25T09:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.831223 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.831306 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.831329 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.831356 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.831375 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:52Z","lastTransitionTime":"2025-11-25T09:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.934402 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.934460 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.934474 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.934495 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:52 crc kubenswrapper[4769]: I1125 09:45:52.934507 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:52Z","lastTransitionTime":"2025-11-25T09:45:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.037322 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.037406 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.037422 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.037437 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.037448 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:53Z","lastTransitionTime":"2025-11-25T09:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.141333 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.141409 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.141424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.141449 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.141463 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:53Z","lastTransitionTime":"2025-11-25T09:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.236674 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.236698 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:53 crc kubenswrapper[4769]: E1125 09:45:53.236872 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:53 crc kubenswrapper[4769]: E1125 09:45:53.237045 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.237213 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:53 crc kubenswrapper[4769]: E1125 09:45:53.237821 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.244035 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.244078 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.244089 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.244105 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.244116 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:53Z","lastTransitionTime":"2025-11-25T09:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.348282 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.348361 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.348379 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.348400 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.348414 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:53Z","lastTransitionTime":"2025-11-25T09:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.451326 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.451378 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.451395 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.451439 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.451454 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:53Z","lastTransitionTime":"2025-11-25T09:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.554900 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.554947 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.554959 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.554997 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.555012 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:53Z","lastTransitionTime":"2025-11-25T09:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.658339 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.658392 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.658433 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.658457 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.658477 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:53Z","lastTransitionTime":"2025-11-25T09:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.761340 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.761378 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.761386 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.761401 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.761411 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:53Z","lastTransitionTime":"2025-11-25T09:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.864659 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.864745 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.864758 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.864791 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.864806 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:53Z","lastTransitionTime":"2025-11-25T09:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.967410 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.967468 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.967483 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.967506 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:53 crc kubenswrapper[4769]: I1125 09:45:53.967521 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:53Z","lastTransitionTime":"2025-11-25T09:45:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.070942 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.071051 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.071073 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.071104 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.071126 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:54Z","lastTransitionTime":"2025-11-25T09:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.174130 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.174203 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.174226 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.174257 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.174277 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:54Z","lastTransitionTime":"2025-11-25T09:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.236004 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:54 crc kubenswrapper[4769]: E1125 09:45:54.236209 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.277559 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.277619 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.277638 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.277661 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.277680 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:54Z","lastTransitionTime":"2025-11-25T09:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.380668 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.380729 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.380743 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.380775 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.380789 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:54Z","lastTransitionTime":"2025-11-25T09:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.489791 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.489879 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.489894 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.489917 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.489982 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:54Z","lastTransitionTime":"2025-11-25T09:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.593203 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.593265 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.593284 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.593311 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.593330 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:54Z","lastTransitionTime":"2025-11-25T09:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.695579 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.695637 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.695652 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.695671 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.695688 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:54Z","lastTransitionTime":"2025-11-25T09:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.798429 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.798469 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.798482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.798497 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.798510 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:54Z","lastTransitionTime":"2025-11-25T09:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.902259 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.902326 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.902346 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.902372 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:54 crc kubenswrapper[4769]: I1125 09:45:54.902392 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:54Z","lastTransitionTime":"2025-11-25T09:45:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.005375 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.005441 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.005458 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.005481 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.005499 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:55Z","lastTransitionTime":"2025-11-25T09:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.109316 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.109470 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.109491 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.109535 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.109551 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:55Z","lastTransitionTime":"2025-11-25T09:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.212592 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.212626 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.212635 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.212653 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.212663 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:55Z","lastTransitionTime":"2025-11-25T09:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.236007 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.236077 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.236022 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:55 crc kubenswrapper[4769]: E1125 09:45:55.236210 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:55 crc kubenswrapper[4769]: E1125 09:45:55.236510 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:55 crc kubenswrapper[4769]: E1125 09:45:55.236682 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.316033 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.316116 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.316138 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.316165 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.316183 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:55Z","lastTransitionTime":"2025-11-25T09:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.419439 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.419474 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.419482 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.419497 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.419507 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:55Z","lastTransitionTime":"2025-11-25T09:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.522803 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.522858 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.522868 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.522885 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.522895 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:55Z","lastTransitionTime":"2025-11-25T09:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.626935 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.627012 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.627022 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.627041 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.627052 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:55Z","lastTransitionTime":"2025-11-25T09:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.730718 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.730953 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.731015 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.731042 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.731057 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:55Z","lastTransitionTime":"2025-11-25T09:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.834330 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.834381 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.834395 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.834413 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.834428 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:55Z","lastTransitionTime":"2025-11-25T09:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.937557 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.937631 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.937652 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.937677 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:55 crc kubenswrapper[4769]: I1125 09:45:55.937697 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:55Z","lastTransitionTime":"2025-11-25T09:45:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.040193 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.040260 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.040273 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.040296 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.040309 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:56Z","lastTransitionTime":"2025-11-25T09:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.049893 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:56 crc kubenswrapper[4769]: E1125 09:45:56.050068 4769 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:45:56 crc kubenswrapper[4769]: E1125 09:45:56.050130 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs podName:76770e00-0d61-45ae-9772-1e8c42dc6ea6 nodeName:}" failed. No retries permitted until 2025-11-25 09:47:00.050114574 +0000 UTC m=+168.635086887 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs") pod "network-metrics-daemon-7khh9" (UID: "76770e00-0d61-45ae-9772-1e8c42dc6ea6") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.143628 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.143742 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.143767 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.143842 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.143868 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:56Z","lastTransitionTime":"2025-11-25T09:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.236760 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:56 crc kubenswrapper[4769]: E1125 09:45:56.237215 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.246207 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.246268 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.246282 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.246307 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.246320 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:56Z","lastTransitionTime":"2025-11-25T09:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.349774 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.349884 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.349907 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.350008 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.350037 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:56Z","lastTransitionTime":"2025-11-25T09:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.453293 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.453352 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.453362 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.453381 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.453395 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:56Z","lastTransitionTime":"2025-11-25T09:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.556554 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.556621 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.556636 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.556659 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.556676 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:56Z","lastTransitionTime":"2025-11-25T09:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.660003 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.660068 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.660078 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.660097 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.660112 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:56Z","lastTransitionTime":"2025-11-25T09:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.763032 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.763095 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.763110 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.763135 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.763153 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:56Z","lastTransitionTime":"2025-11-25T09:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.866938 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.867009 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.867020 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.867040 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.867054 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:56Z","lastTransitionTime":"2025-11-25T09:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.970532 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.970586 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.970598 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.970616 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:56 crc kubenswrapper[4769]: I1125 09:45:56.970629 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:56Z","lastTransitionTime":"2025-11-25T09:45:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.074055 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.074132 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.074148 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.074174 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.074194 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:57Z","lastTransitionTime":"2025-11-25T09:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.177639 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.177710 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.177729 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.177758 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.177778 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:57Z","lastTransitionTime":"2025-11-25T09:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.236706 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.236771 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:57 crc kubenswrapper[4769]: E1125 09:45:57.236886 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.237065 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:57 crc kubenswrapper[4769]: E1125 09:45:57.237130 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:57 crc kubenswrapper[4769]: E1125 09:45:57.237313 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.280983 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.281028 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.281039 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.281058 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.281074 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:57Z","lastTransitionTime":"2025-11-25T09:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.384154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.384201 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.384220 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.384242 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.384253 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:57Z","lastTransitionTime":"2025-11-25T09:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.488407 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.488459 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.488475 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.488499 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.488516 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:57Z","lastTransitionTime":"2025-11-25T09:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.591480 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.591537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.591551 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.591617 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.591629 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:57Z","lastTransitionTime":"2025-11-25T09:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.694477 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.694525 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.694537 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.694557 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.694568 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:57Z","lastTransitionTime":"2025-11-25T09:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.798284 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.798345 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.798358 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.798379 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.798391 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:57Z","lastTransitionTime":"2025-11-25T09:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.900951 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.901030 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.901046 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.901067 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:57 crc kubenswrapper[4769]: I1125 09:45:57.901082 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:57Z","lastTransitionTime":"2025-11-25T09:45:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.007325 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.007393 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.007410 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.007439 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.007456 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:58Z","lastTransitionTime":"2025-11-25T09:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.110424 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.110475 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.110485 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.110501 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.110511 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:58Z","lastTransitionTime":"2025-11-25T09:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.213620 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.213730 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.213749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.213850 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.213882 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:58Z","lastTransitionTime":"2025-11-25T09:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.235891 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:45:58 crc kubenswrapper[4769]: E1125 09:45:58.236151 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.316706 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.316785 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.316810 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.316841 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.316868 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:58Z","lastTransitionTime":"2025-11-25T09:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.421307 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.421376 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.421390 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.421410 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.421424 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:58Z","lastTransitionTime":"2025-11-25T09:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.525198 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.525244 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.525255 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.525283 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.525294 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:58Z","lastTransitionTime":"2025-11-25T09:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.629410 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.629521 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.629555 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.629592 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.629623 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:58Z","lastTransitionTime":"2025-11-25T09:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.732522 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.732570 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.732580 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.732598 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.732609 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:58Z","lastTransitionTime":"2025-11-25T09:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.835794 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.835834 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.835845 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.835863 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.835872 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:58Z","lastTransitionTime":"2025-11-25T09:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.938714 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.938763 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.938775 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.938797 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:58 crc kubenswrapper[4769]: I1125 09:45:58.938810 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:58Z","lastTransitionTime":"2025-11-25T09:45:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.042667 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.042716 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.042730 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.042749 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.042761 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:59Z","lastTransitionTime":"2025-11-25T09:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.146304 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.146380 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.146401 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.146429 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.146448 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:59Z","lastTransitionTime":"2025-11-25T09:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.236764 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.236891 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:45:59 crc kubenswrapper[4769]: E1125 09:45:59.236955 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.236909 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:45:59 crc kubenswrapper[4769]: E1125 09:45:59.237158 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:45:59 crc kubenswrapper[4769]: E1125 09:45:59.237289 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.249386 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.249447 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.249464 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.249484 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.249497 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:59Z","lastTransitionTime":"2025-11-25T09:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.352234 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.352305 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.352321 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.352348 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.352363 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:59Z","lastTransitionTime":"2025-11-25T09:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.455804 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.455865 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.455883 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.455942 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.456058 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:59Z","lastTransitionTime":"2025-11-25T09:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.559192 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.559235 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.559247 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.559267 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.559281 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:59Z","lastTransitionTime":"2025-11-25T09:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.661907 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.662013 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.662027 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.662054 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.662076 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:59Z","lastTransitionTime":"2025-11-25T09:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.767475 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.767547 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.767563 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.767588 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.767608 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:59Z","lastTransitionTime":"2025-11-25T09:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.870391 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.870475 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.870495 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.870523 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.870544 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:59Z","lastTransitionTime":"2025-11-25T09:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.975083 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.975153 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.975167 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.975192 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:45:59 crc kubenswrapper[4769]: I1125 09:45:59.975212 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:45:59Z","lastTransitionTime":"2025-11-25T09:45:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.078766 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.078848 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.078868 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.078895 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.078913 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:46:00Z","lastTransitionTime":"2025-11-25T09:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.182580 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.182666 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.182688 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.182717 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.182740 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:46:00Z","lastTransitionTime":"2025-11-25T09:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.236839 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:00 crc kubenswrapper[4769]: E1125 09:46:00.237156 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.285468 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.285541 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.285555 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.285580 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.285597 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:46:00Z","lastTransitionTime":"2025-11-25T09:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.311046 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.311129 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.311154 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.311196 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.311217 4769 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T09:46:00Z","lastTransitionTime":"2025-11-25T09:46:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.387603 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh"] Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.388429 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.392055 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.392652 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.392912 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.395295 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.450603 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-kzpxc" podStartSLOduration=83.450576112 podStartE2EDuration="1m23.450576112s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.450000536 +0000 UTC m=+109.034972879" watchObservedRunningTime="2025-11-25 09:46:00.450576112 +0000 UTC m=+109.035548445" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.501587 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a78c280-d782-4328-ac13-40e3591f4000-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.501630 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7a78c280-d782-4328-ac13-40e3591f4000-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.501762 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/7a78c280-d782-4328-ac13-40e3591f4000-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.501898 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/7a78c280-d782-4328-ac13-40e3591f4000-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.501924 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7a78c280-d782-4328-ac13-40e3591f4000-service-ca\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.515427 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-27dbp" podStartSLOduration=83.515401931 podStartE2EDuration="1m23.515401931s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.482004554 +0000 UTC m=+109.066976867" watchObservedRunningTime="2025-11-25 09:46:00.515401931 +0000 UTC m=+109.100374244" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.548022 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=89.547993626 podStartE2EDuration="1m29.547993626s" podCreationTimestamp="2025-11-25 09:44:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.53339526 +0000 UTC m=+109.118367583" watchObservedRunningTime="2025-11-25 09:46:00.547993626 +0000 UTC m=+109.132965939" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.566587 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=55.566561421 podStartE2EDuration="55.566561421s" podCreationTimestamp="2025-11-25 09:45:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.548990373 +0000 UTC m=+109.133962726" watchObservedRunningTime="2025-11-25 09:46:00.566561421 +0000 UTC m=+109.151533734" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.587853 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-s47tv" podStartSLOduration=83.587833881 podStartE2EDuration="1m23.587833881s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.566760027 +0000 UTC m=+109.151732360" watchObservedRunningTime="2025-11-25 09:46:00.587833881 +0000 UTC m=+109.172806194" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.602711 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/7a78c280-d782-4328-ac13-40e3591f4000-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.602787 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/7a78c280-d782-4328-ac13-40e3591f4000-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.602806 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7a78c280-d782-4328-ac13-40e3591f4000-service-ca\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.602840 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a78c280-d782-4328-ac13-40e3591f4000-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.602855 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7a78c280-d782-4328-ac13-40e3591f4000-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.602884 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/7a78c280-d782-4328-ac13-40e3591f4000-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.602934 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/7a78c280-d782-4328-ac13-40e3591f4000-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.604016 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/7a78c280-d782-4328-ac13-40e3591f4000-service-ca\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.610758 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a78c280-d782-4328-ac13-40e3591f4000-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.614916 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podStartSLOduration=83.614890702 podStartE2EDuration="1m23.614890702s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.6148137 +0000 UTC m=+109.199786013" watchObservedRunningTime="2025-11-25 09:46:00.614890702 +0000 UTC m=+109.199863015" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.615050 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=89.615046257 podStartE2EDuration="1m29.615046257s" podCreationTimestamp="2025-11-25 09:44:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.588396527 +0000 UTC m=+109.173368850" watchObservedRunningTime="2025-11-25 09:46:00.615046257 +0000 UTC m=+109.200018570" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.628781 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7a78c280-d782-4328-ac13-40e3591f4000-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-s87zh\" (UID: \"7a78c280-d782-4328-ac13-40e3591f4000\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.648123 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wvkdt" podStartSLOduration=82.648107274 podStartE2EDuration="1m22.648107274s" podCreationTimestamp="2025-11-25 09:44:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.647786815 +0000 UTC m=+109.232759128" watchObservedRunningTime="2025-11-25 09:46:00.648107274 +0000 UTC m=+109.233079587" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.713669 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.776949 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-v24zk" podStartSLOduration=83.77692011 podStartE2EDuration="1m23.77692011s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.759222648 +0000 UTC m=+109.344194981" watchObservedRunningTime="2025-11-25 09:46:00.77692011 +0000 UTC m=+109.361892423" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.790111 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=35.790081345 podStartE2EDuration="35.790081345s" podCreationTimestamp="2025-11-25 09:45:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.789061997 +0000 UTC m=+109.374034300" watchObservedRunningTime="2025-11-25 09:46:00.790081345 +0000 UTC m=+109.375053678" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.825420 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=10.825396395 podStartE2EDuration="10.825396395s" podCreationTimestamp="2025-11-25 09:45:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:00.82448577 +0000 UTC m=+109.409458073" watchObservedRunningTime="2025-11-25 09:46:00.825396395 +0000 UTC m=+109.410368708" Nov 25 09:46:00 crc kubenswrapper[4769]: I1125 09:46:00.906529 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" event={"ID":"7a78c280-d782-4328-ac13-40e3591f4000","Type":"ContainerStarted","Data":"c4ce856d284c8ebdd32b690bc28c76afce8a1317e16df38c06e42ad918e043be"} Nov 25 09:46:01 crc kubenswrapper[4769]: I1125 09:46:01.236923 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:01 crc kubenswrapper[4769]: I1125 09:46:01.237011 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:01 crc kubenswrapper[4769]: E1125 09:46:01.237154 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:01 crc kubenswrapper[4769]: I1125 09:46:01.237206 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:01 crc kubenswrapper[4769]: E1125 09:46:01.237876 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:01 crc kubenswrapper[4769]: E1125 09:46:01.237959 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:01 crc kubenswrapper[4769]: I1125 09:46:01.238402 4769 scope.go:117] "RemoveContainer" containerID="c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f" Nov 25 09:46:01 crc kubenswrapper[4769]: E1125 09:46:01.238667 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\"" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" Nov 25 09:46:01 crc kubenswrapper[4769]: I1125 09:46:01.913650 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" event={"ID":"7a78c280-d782-4328-ac13-40e3591f4000","Type":"ContainerStarted","Data":"eb234c774087d3b1153fd0f38fca00eadde4dc56059b84416c25956782bcf987"} Nov 25 09:46:02 crc kubenswrapper[4769]: I1125 09:46:02.236517 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:02 crc kubenswrapper[4769]: E1125 09:46:02.237525 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:03 crc kubenswrapper[4769]: I1125 09:46:03.236190 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:03 crc kubenswrapper[4769]: E1125 09:46:03.236397 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:03 crc kubenswrapper[4769]: I1125 09:46:03.236224 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:03 crc kubenswrapper[4769]: E1125 09:46:03.236524 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:03 crc kubenswrapper[4769]: I1125 09:46:03.237126 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:03 crc kubenswrapper[4769]: E1125 09:46:03.237268 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:04 crc kubenswrapper[4769]: I1125 09:46:04.236488 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:04 crc kubenswrapper[4769]: E1125 09:46:04.236779 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:05 crc kubenswrapper[4769]: I1125 09:46:05.236673 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:05 crc kubenswrapper[4769]: I1125 09:46:05.236718 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:05 crc kubenswrapper[4769]: I1125 09:46:05.236782 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:05 crc kubenswrapper[4769]: E1125 09:46:05.237203 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:05 crc kubenswrapper[4769]: E1125 09:46:05.237539 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:05 crc kubenswrapper[4769]: E1125 09:46:05.237734 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:06 crc kubenswrapper[4769]: I1125 09:46:06.237035 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:06 crc kubenswrapper[4769]: E1125 09:46:06.238390 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:07 crc kubenswrapper[4769]: I1125 09:46:07.236186 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:07 crc kubenswrapper[4769]: I1125 09:46:07.236304 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:07 crc kubenswrapper[4769]: I1125 09:46:07.236353 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:07 crc kubenswrapper[4769]: E1125 09:46:07.236538 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:07 crc kubenswrapper[4769]: E1125 09:46:07.236684 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:07 crc kubenswrapper[4769]: E1125 09:46:07.236845 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:08 crc kubenswrapper[4769]: I1125 09:46:08.236750 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:08 crc kubenswrapper[4769]: E1125 09:46:08.236991 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:09 crc kubenswrapper[4769]: I1125 09:46:09.236878 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:09 crc kubenswrapper[4769]: I1125 09:46:09.236919 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:09 crc kubenswrapper[4769]: I1125 09:46:09.237013 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:09 crc kubenswrapper[4769]: E1125 09:46:09.237101 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:09 crc kubenswrapper[4769]: E1125 09:46:09.237371 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:09 crc kubenswrapper[4769]: E1125 09:46:09.237483 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:10 crc kubenswrapper[4769]: I1125 09:46:10.236517 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:10 crc kubenswrapper[4769]: E1125 09:46:10.237147 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:11 crc kubenswrapper[4769]: I1125 09:46:11.236842 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:11 crc kubenswrapper[4769]: I1125 09:46:11.236924 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:11 crc kubenswrapper[4769]: I1125 09:46:11.237072 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:11 crc kubenswrapper[4769]: E1125 09:46:11.237265 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:11 crc kubenswrapper[4769]: E1125 09:46:11.237507 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:11 crc kubenswrapper[4769]: E1125 09:46:11.237594 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:12 crc kubenswrapper[4769]: I1125 09:46:12.236396 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:12 crc kubenswrapper[4769]: E1125 09:46:12.237629 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:12 crc kubenswrapper[4769]: I1125 09:46:12.238636 4769 scope.go:117] "RemoveContainer" containerID="c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f" Nov 25 09:46:12 crc kubenswrapper[4769]: E1125 09:46:12.238924 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-kfvzs_openshift-ovn-kubernetes(069c06c6-fe60-41d0-b96d-86606f55b258)\"" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" Nov 25 09:46:12 crc kubenswrapper[4769]: E1125 09:46:12.255269 4769 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 25 09:46:12 crc kubenswrapper[4769]: E1125 09:46:12.351948 4769 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:46:12 crc kubenswrapper[4769]: I1125 09:46:12.954101 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-s47tv_025219f0-bc69-4a33-acaa-b055607272bb/kube-multus/1.log" Nov 25 09:46:12 crc kubenswrapper[4769]: I1125 09:46:12.954799 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-s47tv_025219f0-bc69-4a33-acaa-b055607272bb/kube-multus/0.log" Nov 25 09:46:12 crc kubenswrapper[4769]: I1125 09:46:12.954852 4769 generic.go:334] "Generic (PLEG): container finished" podID="025219f0-bc69-4a33-acaa-b055607272bb" containerID="9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87" exitCode=1 Nov 25 09:46:12 crc kubenswrapper[4769]: I1125 09:46:12.954895 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-s47tv" event={"ID":"025219f0-bc69-4a33-acaa-b055607272bb","Type":"ContainerDied","Data":"9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87"} Nov 25 09:46:12 crc kubenswrapper[4769]: I1125 09:46:12.954950 4769 scope.go:117] "RemoveContainer" containerID="b8283067913fddd4b3d58c8dc0cc7000d482d929bc6a1d4fb501bfa8c0fa9525" Nov 25 09:46:12 crc kubenswrapper[4769]: I1125 09:46:12.955370 4769 scope.go:117] "RemoveContainer" containerID="9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87" Nov 25 09:46:12 crc kubenswrapper[4769]: E1125 09:46:12.955658 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-s47tv_openshift-multus(025219f0-bc69-4a33-acaa-b055607272bb)\"" pod="openshift-multus/multus-s47tv" podUID="025219f0-bc69-4a33-acaa-b055607272bb" Nov 25 09:46:12 crc kubenswrapper[4769]: I1125 09:46:12.975216 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-s87zh" podStartSLOduration=95.975196301 podStartE2EDuration="1m35.975196301s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:01.934475088 +0000 UTC m=+110.519447501" watchObservedRunningTime="2025-11-25 09:46:12.975196301 +0000 UTC m=+121.560168614" Nov 25 09:46:13 crc kubenswrapper[4769]: I1125 09:46:13.236991 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:13 crc kubenswrapper[4769]: I1125 09:46:13.237013 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:13 crc kubenswrapper[4769]: I1125 09:46:13.237024 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:13 crc kubenswrapper[4769]: E1125 09:46:13.237171 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:13 crc kubenswrapper[4769]: E1125 09:46:13.237392 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:13 crc kubenswrapper[4769]: E1125 09:46:13.237507 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:13 crc kubenswrapper[4769]: I1125 09:46:13.961803 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-s47tv_025219f0-bc69-4a33-acaa-b055607272bb/kube-multus/1.log" Nov 25 09:46:14 crc kubenswrapper[4769]: I1125 09:46:14.236468 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:14 crc kubenswrapper[4769]: E1125 09:46:14.236738 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:15 crc kubenswrapper[4769]: I1125 09:46:15.236504 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:15 crc kubenswrapper[4769]: I1125 09:46:15.236592 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:15 crc kubenswrapper[4769]: I1125 09:46:15.236649 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:15 crc kubenswrapper[4769]: E1125 09:46:15.236739 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:15 crc kubenswrapper[4769]: E1125 09:46:15.237018 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:15 crc kubenswrapper[4769]: E1125 09:46:15.237271 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:16 crc kubenswrapper[4769]: I1125 09:46:16.236624 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:16 crc kubenswrapper[4769]: E1125 09:46:16.236830 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:17 crc kubenswrapper[4769]: I1125 09:46:17.236143 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:17 crc kubenswrapper[4769]: E1125 09:46:17.236329 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:17 crc kubenswrapper[4769]: I1125 09:46:17.236330 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:17 crc kubenswrapper[4769]: I1125 09:46:17.236374 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:17 crc kubenswrapper[4769]: E1125 09:46:17.236424 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:17 crc kubenswrapper[4769]: E1125 09:46:17.236562 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:17 crc kubenswrapper[4769]: E1125 09:46:17.353362 4769 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:46:18 crc kubenswrapper[4769]: I1125 09:46:18.237143 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:18 crc kubenswrapper[4769]: E1125 09:46:18.237719 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:19 crc kubenswrapper[4769]: I1125 09:46:19.236485 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:19 crc kubenswrapper[4769]: E1125 09:46:19.236678 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:19 crc kubenswrapper[4769]: I1125 09:46:19.236854 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:19 crc kubenswrapper[4769]: I1125 09:46:19.236875 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:19 crc kubenswrapper[4769]: E1125 09:46:19.237164 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:19 crc kubenswrapper[4769]: E1125 09:46:19.237229 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:20 crc kubenswrapper[4769]: I1125 09:46:20.235924 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:20 crc kubenswrapper[4769]: E1125 09:46:20.236207 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:21 crc kubenswrapper[4769]: I1125 09:46:21.236677 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:21 crc kubenswrapper[4769]: I1125 09:46:21.236729 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:21 crc kubenswrapper[4769]: I1125 09:46:21.236831 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:21 crc kubenswrapper[4769]: E1125 09:46:21.236883 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:21 crc kubenswrapper[4769]: E1125 09:46:21.237091 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:21 crc kubenswrapper[4769]: E1125 09:46:21.237234 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:22 crc kubenswrapper[4769]: I1125 09:46:22.236766 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:22 crc kubenswrapper[4769]: E1125 09:46:22.238145 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:22 crc kubenswrapper[4769]: E1125 09:46:22.354019 4769 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:46:23 crc kubenswrapper[4769]: I1125 09:46:23.237198 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:23 crc kubenswrapper[4769]: I1125 09:46:23.237258 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:23 crc kubenswrapper[4769]: E1125 09:46:23.237457 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:23 crc kubenswrapper[4769]: I1125 09:46:23.237537 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:23 crc kubenswrapper[4769]: E1125 09:46:23.237702 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:23 crc kubenswrapper[4769]: E1125 09:46:23.237873 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:24 crc kubenswrapper[4769]: I1125 09:46:24.236857 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:24 crc kubenswrapper[4769]: I1125 09:46:24.237429 4769 scope.go:117] "RemoveContainer" containerID="9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87" Nov 25 09:46:24 crc kubenswrapper[4769]: E1125 09:46:24.237673 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:24 crc kubenswrapper[4769]: I1125 09:46:24.239600 4769 scope.go:117] "RemoveContainer" containerID="c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f" Nov 25 09:46:25 crc kubenswrapper[4769]: I1125 09:46:25.005687 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/3.log" Nov 25 09:46:25 crc kubenswrapper[4769]: I1125 09:46:25.008323 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerStarted","Data":"7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98"} Nov 25 09:46:25 crc kubenswrapper[4769]: I1125 09:46:25.008779 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:46:25 crc kubenswrapper[4769]: I1125 09:46:25.009989 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-s47tv_025219f0-bc69-4a33-acaa-b055607272bb/kube-multus/1.log" Nov 25 09:46:25 crc kubenswrapper[4769]: I1125 09:46:25.010036 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-s47tv" event={"ID":"025219f0-bc69-4a33-acaa-b055607272bb","Type":"ContainerStarted","Data":"cc82a4e087ecb46a40d5a0406ed6677bd0f4e3c88a444882e56d3e955ab8749a"} Nov 25 09:46:25 crc kubenswrapper[4769]: I1125 09:46:25.037321 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podStartSLOduration=108.037295884 podStartE2EDuration="1m48.037295884s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:25.036815905 +0000 UTC m=+133.621788228" watchObservedRunningTime="2025-11-25 09:46:25.037295884 +0000 UTC m=+133.622268187" Nov 25 09:46:25 crc kubenswrapper[4769]: I1125 09:46:25.226847 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-7khh9"] Nov 25 09:46:25 crc kubenswrapper[4769]: I1125 09:46:25.227020 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:25 crc kubenswrapper[4769]: E1125 09:46:25.227150 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:25 crc kubenswrapper[4769]: I1125 09:46:25.236198 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:25 crc kubenswrapper[4769]: I1125 09:46:25.236228 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:25 crc kubenswrapper[4769]: I1125 09:46:25.236403 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:25 crc kubenswrapper[4769]: E1125 09:46:25.236395 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:25 crc kubenswrapper[4769]: E1125 09:46:25.236522 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:25 crc kubenswrapper[4769]: E1125 09:46:25.236601 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:27 crc kubenswrapper[4769]: I1125 09:46:27.236673 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:27 crc kubenswrapper[4769]: I1125 09:46:27.236718 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:27 crc kubenswrapper[4769]: E1125 09:46:27.237607 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:27 crc kubenswrapper[4769]: I1125 09:46:27.236776 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:27 crc kubenswrapper[4769]: E1125 09:46:27.237782 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:27 crc kubenswrapper[4769]: E1125 09:46:27.238027 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:27 crc kubenswrapper[4769]: I1125 09:46:27.236723 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:27 crc kubenswrapper[4769]: E1125 09:46:27.238352 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:27 crc kubenswrapper[4769]: E1125 09:46:27.355949 4769 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:46:29 crc kubenswrapper[4769]: I1125 09:46:29.236552 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:29 crc kubenswrapper[4769]: I1125 09:46:29.236684 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:29 crc kubenswrapper[4769]: E1125 09:46:29.236766 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:29 crc kubenswrapper[4769]: I1125 09:46:29.236789 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:29 crc kubenswrapper[4769]: I1125 09:46:29.236857 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:29 crc kubenswrapper[4769]: E1125 09:46:29.237064 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:29 crc kubenswrapper[4769]: E1125 09:46:29.237154 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:29 crc kubenswrapper[4769]: E1125 09:46:29.237251 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:31 crc kubenswrapper[4769]: I1125 09:46:31.236860 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:31 crc kubenswrapper[4769]: E1125 09:46:31.237177 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-7khh9" podUID="76770e00-0d61-45ae-9772-1e8c42dc6ea6" Nov 25 09:46:31 crc kubenswrapper[4769]: I1125 09:46:31.236852 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:31 crc kubenswrapper[4769]: E1125 09:46:31.237260 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 09:46:31 crc kubenswrapper[4769]: I1125 09:46:31.236881 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:31 crc kubenswrapper[4769]: E1125 09:46:31.237309 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 09:46:31 crc kubenswrapper[4769]: I1125 09:46:31.236852 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:31 crc kubenswrapper[4769]: E1125 09:46:31.237354 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 09:46:33 crc kubenswrapper[4769]: I1125 09:46:33.236568 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:33 crc kubenswrapper[4769]: I1125 09:46:33.236764 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:33 crc kubenswrapper[4769]: I1125 09:46:33.236953 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:33 crc kubenswrapper[4769]: I1125 09:46:33.237134 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:46:33 crc kubenswrapper[4769]: I1125 09:46:33.239796 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 09:46:33 crc kubenswrapper[4769]: I1125 09:46:33.240047 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 09:46:33 crc kubenswrapper[4769]: I1125 09:46:33.242913 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 09:46:33 crc kubenswrapper[4769]: I1125 09:46:33.242928 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 09:46:33 crc kubenswrapper[4769]: I1125 09:46:33.242952 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 09:46:33 crc kubenswrapper[4769]: I1125 09:46:33.243058 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 09:46:39 crc kubenswrapper[4769]: I1125 09:46:39.051802 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:39 crc kubenswrapper[4769]: I1125 09:46:39.060228 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:39 crc kubenswrapper[4769]: I1125 09:46:39.153778 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:39 crc kubenswrapper[4769]: I1125 09:46:39.153947 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:39 crc kubenswrapper[4769]: I1125 09:46:39.154026 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:39 crc kubenswrapper[4769]: E1125 09:46:39.154164 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:48:41.154111415 +0000 UTC m=+269.739083758 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:39 crc kubenswrapper[4769]: I1125 09:46:39.154342 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:39 crc kubenswrapper[4769]: I1125 09:46:39.157775 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:39 crc kubenswrapper[4769]: I1125 09:46:39.158087 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:39 crc kubenswrapper[4769]: I1125 09:46:39.208738 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:39 crc kubenswrapper[4769]: I1125 09:46:39.258282 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 09:46:39 crc kubenswrapper[4769]: I1125 09:46:39.270502 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 09:46:39 crc kubenswrapper[4769]: I1125 09:46:39.279632 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:39 crc kubenswrapper[4769]: W1125 09:46:39.613154 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-95e3d0649d0861d7235823b475913ffaef8af0699173d4693f90625dd7c2fb80 WatchSource:0}: Error finding container 95e3d0649d0861d7235823b475913ffaef8af0699173d4693f90625dd7c2fb80: Status 404 returned error can't find the container with id 95e3d0649d0861d7235823b475913ffaef8af0699173d4693f90625dd7c2fb80 Nov 25 09:46:39 crc kubenswrapper[4769]: W1125 09:46:39.621423 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-befd18afe2afaa9111562a12c098ffd9d8becc2e62fb0c4252c420ec577884e1 WatchSource:0}: Error finding container befd18afe2afaa9111562a12c098ffd9d8becc2e62fb0c4252c420ec577884e1: Status 404 returned error can't find the container with id befd18afe2afaa9111562a12c098ffd9d8becc2e62fb0c4252c420ec577884e1 Nov 25 09:46:40 crc kubenswrapper[4769]: I1125 09:46:40.076134 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"b20623acde142189627f914df86eb401f90ac75e0a0fcd97ee696e8f03a07bd1"} Nov 25 09:46:40 crc kubenswrapper[4769]: I1125 09:46:40.076516 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"0c2aa2aacda014a1d399a2c3599cd86319a48166d47d023f0b8936e2b9089c31"} Nov 25 09:46:40 crc kubenswrapper[4769]: I1125 09:46:40.077852 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"89117a5c9bb8451da69d98d43817526c32dd425068476595fefb1d0ac730350a"} Nov 25 09:46:40 crc kubenswrapper[4769]: I1125 09:46:40.077904 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"95e3d0649d0861d7235823b475913ffaef8af0699173d4693f90625dd7c2fb80"} Nov 25 09:46:40 crc kubenswrapper[4769]: I1125 09:46:40.078149 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:46:40 crc kubenswrapper[4769]: I1125 09:46:40.080099 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"054427de1189f7a5a515a9b2d9cc1b46ecfece13bffa39aae6e0cd0c24711818"} Nov 25 09:46:40 crc kubenswrapper[4769]: I1125 09:46:40.080153 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"befd18afe2afaa9111562a12c098ffd9d8becc2e62fb0c4252c420ec577884e1"} Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.087435 4769 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.130087 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-bd82s"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.130755 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.131181 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.131702 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.134082 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.135942 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-ghgwj"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.136555 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.136720 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.138026 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.138202 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.138453 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-twb6l"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.139099 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.139166 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.140548 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.141148 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.142273 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zjqhj"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.142462 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.142915 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.144485 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.145201 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.145760 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.152123 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.152502 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.152817 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.153071 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-xg47p"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.153319 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-l2kg2"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.153529 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-s289l"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.153820 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-q8hkd"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.154055 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-shvhw"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.154443 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.154870 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.155348 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.152526 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.155694 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.155918 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.156184 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.156429 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-l2kg2" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.156624 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-s289l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.156829 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.157122 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.152593 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.161768 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-tn4zj"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.162500 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.163951 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.164352 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.172767 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.173264 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.175014 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.175211 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.176145 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.176394 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.176716 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.152899 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.185752 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.200568 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.200927 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.201083 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.201159 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.201260 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.201319 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.201120 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.201464 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.201524 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.201674 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.201755 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.201885 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.202162 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.202325 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.202347 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.202523 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.202734 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.203997 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.204127 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.204271 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.204493 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.205285 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.205637 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.205826 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.206026 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.206318 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.206733 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207384 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.206738 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207860 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207954 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.206783 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.208202 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.208296 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.206843 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.206902 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.206918 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.206946 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.208643 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207026 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207109 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207179 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207205 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207229 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207262 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207315 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207343 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207419 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207480 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207499 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207533 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207570 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207632 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207687 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.207786 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.209866 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.211243 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.211360 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.211448 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.211541 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.211785 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.212391 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.212607 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-zs9d4"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.212685 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.212795 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.212917 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213112 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213202 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213221 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213265 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213282 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213306 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213356 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213374 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213393 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213446 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213458 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213476 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213559 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213603 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213668 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213745 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.213968 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.214085 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.214249 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.214410 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.215472 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-4w9xd"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.216263 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4w9xd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.219132 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.219307 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.219673 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.234271 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.243653 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-xrd6w"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.243799 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.246472 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.269141 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.271378 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.272142 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.273533 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.274265 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.275153 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.275352 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.275427 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-bd82s"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.275635 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.275660 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.275885 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.280019 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.280716 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-qthpd"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.281410 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.281878 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9xps\" (UniqueName: \"kubernetes.io/projected/8b74c1fc-0aa2-4c74-80a1-3743defdbe7a-kube-api-access-p9xps\") pod \"ingress-operator-5b745b69d9-6dlmm\" (UID: \"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.281931 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.281982 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282010 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ad7cc35-130e-4404-8a4f-173884ab4e41-serving-cert\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282032 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282055 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3ad7cc35-130e-4404-8a4f-173884ab4e41-etcd-client\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282074 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-service-ca\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282095 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqzfl\" (UniqueName: \"kubernetes.io/projected/da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209-kube-api-access-dqzfl\") pod \"openshift-controller-manager-operator-756b6f6bc6-nk6zl\" (UID: \"da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282116 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlprx\" (UniqueName: \"kubernetes.io/projected/08c157ad-8d10-4284-a4e4-eb0b55a56542-kube-api-access-wlprx\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282179 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bsw6\" (UniqueName: \"kubernetes.io/projected/b5cfa364-a70a-4a62-a3ec-05285661a031-kube-api-access-2bsw6\") pod \"machine-approver-56656f9798-gb44m\" (UID: \"b5cfa364-a70a-4a62-a3ec-05285661a031\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282219 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08c157ad-8d10-4284-a4e4-eb0b55a56542-serving-cert\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282260 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-image-import-ca\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282324 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/73d814ac-d3f7-4ed1-a37c-cfb7f988e88c-trusted-ca\") pod \"console-operator-58897d9998-xg47p\" (UID: \"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c\") " pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282353 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-config\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282370 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4ddz\" (UniqueName: \"kubernetes.io/projected/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-kube-api-access-n4ddz\") pod \"route-controller-manager-6576b87f9c-cv8sw\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282402 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/d73449ca-6268-455c-a1b5-79e0c6c79779-available-featuregates\") pod \"openshift-config-operator-7777fb866f-bd82s\" (UID: \"d73449ca-6268-455c-a1b5-79e0c6c79779\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282426 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/446806b9-7941-43d7-885c-61c1d577811d-serving-cert\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282465 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkplg\" (UniqueName: \"kubernetes.io/projected/afd3a02c-c392-4d05-8152-009157a52bf2-kube-api-access-qkplg\") pod \"cluster-image-registry-operator-dc59b4c8b-t7rc7\" (UID: \"afd3a02c-c392-4d05-8152-009157a52bf2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282496 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2k7c\" (UniqueName: \"kubernetes.io/projected/73d814ac-d3f7-4ed1-a37c-cfb7f988e88c-kube-api-access-n2k7c\") pod \"console-operator-58897d9998-xg47p\" (UID: \"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c\") " pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282543 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5cfa364-a70a-4a62-a3ec-05285661a031-config\") pod \"machine-approver-56656f9798-gb44m\" (UID: \"b5cfa364-a70a-4a62-a3ec-05285661a031\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282567 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282591 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282616 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282759 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282782 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-trusted-ca-bundle\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282801 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8b74c1fc-0aa2-4c74-80a1-3743defdbe7a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-6dlmm\" (UID: \"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282820 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/afd3a02c-c392-4d05-8152-009157a52bf2-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-t7rc7\" (UID: \"afd3a02c-c392-4d05-8152-009157a52bf2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282841 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ad7cc35-130e-4404-8a4f-173884ab4e41-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282860 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pmht\" (UniqueName: \"kubernetes.io/projected/c7f5c01f-4820-4168-a19b-761a9e56650b-kube-api-access-6pmht\") pod \"openshift-apiserver-operator-796bbdcf4f-g8wkd\" (UID: \"c7f5c01f-4820-4168-a19b-761a9e56650b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282880 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-config\") pod \"route-controller-manager-6576b87f9c-cv8sw\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282899 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-config\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282918 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b5cfa364-a70a-4a62-a3ec-05285661a031-auth-proxy-config\") pod \"machine-approver-56656f9798-gb44m\" (UID: \"b5cfa364-a70a-4a62-a3ec-05285661a031\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.282982 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95jrj\" (UniqueName: \"kubernetes.io/projected/99e5c770-c7b9-496d-93c7-bd8151e1bd49-kube-api-access-95jrj\") pod \"dns-operator-744455d44c-s289l\" (UID: \"99e5c770-c7b9-496d-93c7-bd8151e1bd49\") " pod="openshift-dns-operator/dns-operator-744455d44c-s289l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283013 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-client-ca\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283037 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/00b22ed8-e9b5-4173-8574-b06254cd0965-audit-dir\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283059 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3ad7cc35-130e-4404-8a4f-173884ab4e41-audit-policies\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283077 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3ad7cc35-130e-4404-8a4f-173884ab4e41-encryption-config\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283094 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/99e5c770-c7b9-496d-93c7-bd8151e1bd49-metrics-tls\") pod \"dns-operator-744455d44c-s289l\" (UID: \"99e5c770-c7b9-496d-93c7-bd8151e1bd49\") " pod="openshift-dns-operator/dns-operator-744455d44c-s289l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283123 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-config\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283142 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8b74c1fc-0aa2-4c74-80a1-3743defdbe7a-metrics-tls\") pod \"ingress-operator-5b745b69d9-6dlmm\" (UID: \"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283163 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b5c763b0-7d68-4d32-871e-7e58ac32f03f-service-ca-bundle\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283182 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4djqh\" (UniqueName: \"kubernetes.io/projected/00b22ed8-e9b5-4173-8574-b06254cd0965-kube-api-access-4djqh\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283227 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73d814ac-d3f7-4ed1-a37c-cfb7f988e88c-config\") pod \"console-operator-58897d9998-xg47p\" (UID: \"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c\") " pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283246 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/73d814ac-d3f7-4ed1-a37c-cfb7f988e88c-serving-cert\") pod \"console-operator-58897d9998-xg47p\" (UID: \"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c\") " pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283266 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/5351ce01-8233-4a92-b15d-befd4e57b0d2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-twb6l\" (UID: \"5351ce01-8233-4a92-b15d-befd4e57b0d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283292 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/6c5b8b29-e065-4587-88d6-cd93b56a1657-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-p57pg\" (UID: \"6c5b8b29-e065-4587-88d6-cd93b56a1657\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283354 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-serving-cert\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283447 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cs482\" (UniqueName: \"kubernetes.io/projected/d73449ca-6268-455c-a1b5-79e0c6c79779-kube-api-access-cs482\") pod \"openshift-config-operator-7777fb866f-bd82s\" (UID: \"d73449ca-6268-455c-a1b5-79e0c6c79779\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283470 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/08c157ad-8d10-4284-a4e4-eb0b55a56542-etcd-client\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283515 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3ad7cc35-130e-4404-8a4f-173884ab4e41-audit-dir\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283551 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8b74c1fc-0aa2-4c74-80a1-3743defdbe7a-trusted-ca\") pod \"ingress-operator-5b745b69d9-6dlmm\" (UID: \"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283577 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283598 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mg58z\" (UniqueName: \"kubernetes.io/projected/446806b9-7941-43d7-885c-61c1d577811d-kube-api-access-mg58z\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283633 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-568dv\" (UniqueName: \"kubernetes.io/projected/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-kube-api-access-568dv\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283652 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/08c157ad-8d10-4284-a4e4-eb0b55a56542-audit-dir\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283673 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/b5cfa364-a70a-4a62-a3ec-05285661a031-machine-approver-tls\") pod \"machine-approver-56656f9798-gb44m\" (UID: \"b5cfa364-a70a-4a62-a3ec-05285661a031\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283709 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283732 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5351ce01-8233-4a92-b15d-befd4e57b0d2-config\") pod \"machine-api-operator-5694c8668f-twb6l\" (UID: \"5351ce01-8233-4a92-b15d-befd4e57b0d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283753 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-nk6zl\" (UID: \"da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283773 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-serving-cert\") pod \"route-controller-manager-6576b87f9c-cv8sw\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283810 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/08c157ad-8d10-4284-a4e4-eb0b55a56542-node-pullsecrets\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283829 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-audit\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283849 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-etcd-serving-ca\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283867 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3ad7cc35-130e-4404-8a4f-173884ab4e41-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283882 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7f5c01f-4820-4168-a19b-761a9e56650b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-g8wkd\" (UID: \"c7f5c01f-4820-4168-a19b-761a9e56650b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283900 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-oauth-config\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283942 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7f5c01f-4820-4168-a19b-761a9e56650b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-g8wkd\" (UID: \"c7f5c01f-4820-4168-a19b-761a9e56650b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.283985 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dh2mj\" (UniqueName: \"kubernetes.io/projected/b5c763b0-7d68-4d32-871e-7e58ac32f03f-kube-api-access-dh2mj\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284004 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f668f\" (UniqueName: \"kubernetes.io/projected/01b73250-0ba8-4c2d-9ada-dacc89a70a7d-kube-api-access-f668f\") pod \"downloads-7954f5f757-l2kg2\" (UID: \"01b73250-0ba8-4c2d-9ada-dacc89a70a7d\") " pod="openshift-console/downloads-7954f5f757-l2kg2" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284021 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qngnh\" (UniqueName: \"kubernetes.io/projected/3ad7cc35-130e-4404-8a4f-173884ab4e41-kube-api-access-qngnh\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284095 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-nk6zl\" (UID: \"da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284165 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284170 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d73449ca-6268-455c-a1b5-79e0c6c79779-serving-cert\") pod \"openshift-config-operator-7777fb866f-bd82s\" (UID: \"d73449ca-6268-455c-a1b5-79e0c6c79779\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284311 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/5351ce01-8233-4a92-b15d-befd4e57b0d2-images\") pod \"machine-api-operator-5694c8668f-twb6l\" (UID: \"5351ce01-8233-4a92-b15d-befd4e57b0d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284328 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5c763b0-7d68-4d32-871e-7e58ac32f03f-config\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284345 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/08c157ad-8d10-4284-a4e4-eb0b55a56542-encryption-config\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284363 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwh87\" (UniqueName: \"kubernetes.io/projected/6c5b8b29-e065-4587-88d6-cd93b56a1657-kube-api-access-dwh87\") pod \"cluster-samples-operator-665b6dd947-p57pg\" (UID: \"6c5b8b29-e065-4587-88d6-cd93b56a1657\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284377 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-client-ca\") pod \"route-controller-manager-6576b87f9c-cv8sw\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284392 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-trusted-ca-bundle\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284411 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-audit-policies\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284426 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284441 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/afd3a02c-c392-4d05-8152-009157a52bf2-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-t7rc7\" (UID: \"afd3a02c-c392-4d05-8152-009157a52bf2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284458 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b5c763b0-7d68-4d32-871e-7e58ac32f03f-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284474 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-oauth-serving-cert\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284489 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdvq9\" (UniqueName: \"kubernetes.io/projected/5351ce01-8233-4a92-b15d-befd4e57b0d2-kube-api-access-cdvq9\") pod \"machine-api-operator-5694c8668f-twb6l\" (UID: \"5351ce01-8233-4a92-b15d-befd4e57b0d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284505 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/afd3a02c-c392-4d05-8152-009157a52bf2-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-t7rc7\" (UID: \"afd3a02c-c392-4d05-8152-009157a52bf2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284535 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284550 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5c763b0-7d68-4d32-871e-7e58ac32f03f-serving-cert\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.284567 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.287649 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.288125 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.288794 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.289576 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.290065 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rdchw"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.290314 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.290474 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.293238 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.293445 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-skhr4"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.294564 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.296143 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.297253 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.297400 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.298316 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.299453 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-wvj7p"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.301846 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-g58mb"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.302893 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.303456 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.304147 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-twb6l"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.305403 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.307226 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.308152 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.310145 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.311071 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.313303 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.313720 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-l2kg2"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.314666 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-ghgwj"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.315615 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-xg47p"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.316541 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-g7vhl"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.317034 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-g7vhl" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.318032 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-tn4zj"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.318494 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.319427 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.320256 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.335212 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-shvhw"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.336657 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.337786 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.339259 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-qthpd"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.340543 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.341946 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-wvj7p"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.343817 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-4w9xd"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.344508 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.350830 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-xrd6w"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.352151 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.353919 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-q8hkd"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.354268 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.354659 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.357918 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.364562 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.366614 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zjqhj"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.369040 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.373604 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-s289l"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.374441 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.375087 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.376708 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-blk8z"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.377438 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-blk8z" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.378118 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-sx97n"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.379360 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.379458 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.380469 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.381771 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.384354 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-sx97n"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385052 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rdchw"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385559 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/08c157ad-8d10-4284-a4e4-eb0b55a56542-etcd-client\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385616 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3ad7cc35-130e-4404-8a4f-173884ab4e41-audit-dir\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385640 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385659 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-568dv\" (UniqueName: \"kubernetes.io/projected/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-kube-api-access-568dv\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385718 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/08c157ad-8d10-4284-a4e4-eb0b55a56542-audit-dir\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385740 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/b5cfa364-a70a-4a62-a3ec-05285661a031-machine-approver-tls\") pod \"machine-approver-56656f9798-gb44m\" (UID: \"b5cfa364-a70a-4a62-a3ec-05285661a031\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385761 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5351ce01-8233-4a92-b15d-befd4e57b0d2-config\") pod \"machine-api-operator-5694c8668f-twb6l\" (UID: \"5351ce01-8233-4a92-b15d-befd4e57b0d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385798 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/08c157ad-8d10-4284-a4e4-eb0b55a56542-node-pullsecrets\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385818 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-oauth-config\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385841 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4ed994f7-d70c-4220-a142-f23cb0550377-etcd-client\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385878 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dh2mj\" (UniqueName: \"kubernetes.io/projected/b5c763b0-7d68-4d32-871e-7e58ac32f03f-kube-api-access-dh2mj\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385905 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f668f\" (UniqueName: \"kubernetes.io/projected/01b73250-0ba8-4c2d-9ada-dacc89a70a7d-kube-api-access-f668f\") pod \"downloads-7954f5f757-l2kg2\" (UID: \"01b73250-0ba8-4c2d-9ada-dacc89a70a7d\") " pod="openshift-console/downloads-7954f5f757-l2kg2" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.385925 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d73449ca-6268-455c-a1b5-79e0c6c79779-serving-cert\") pod \"openshift-config-operator-7777fb866f-bd82s\" (UID: \"d73449ca-6268-455c-a1b5-79e0c6c79779\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386002 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/5351ce01-8233-4a92-b15d-befd4e57b0d2-images\") pod \"machine-api-operator-5694c8668f-twb6l\" (UID: \"5351ce01-8233-4a92-b15d-befd4e57b0d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386036 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0d5eb36d-4ded-4f1f-916d-140c5f814e27-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-v4q28\" (UID: \"0d5eb36d-4ded-4f1f-916d-140c5f814e27\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386080 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-nk6zl\" (UID: \"da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386105 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4257s\" (UniqueName: \"kubernetes.io/projected/326e56a0-cea9-40a2-ac71-cac257ebf902-kube-api-access-4257s\") pod \"migrator-59844c95c7-4w9xd\" (UID: \"326e56a0-cea9-40a2-ac71-cac257ebf902\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4w9xd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386168 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5c763b0-7d68-4d32-871e-7e58ac32f03f-config\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386192 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e483c02c-6538-4d83-89d0-8c474de6c1cb-config\") pod \"kube-controller-manager-operator-78b949d7b-mvk5j\" (UID: \"e483c02c-6538-4d83-89d0-8c474de6c1cb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386234 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwh87\" (UniqueName: \"kubernetes.io/projected/6c5b8b29-e065-4587-88d6-cd93b56a1657-kube-api-access-dwh87\") pod \"cluster-samples-operator-665b6dd947-p57pg\" (UID: \"6c5b8b29-e065-4587-88d6-cd93b56a1657\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386255 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-client-ca\") pod \"route-controller-manager-6576b87f9c-cv8sw\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386280 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/c3329bde-d97b-47a5-96dc-a033e3c4bc8c-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-qz6sr\" (UID: \"c3329bde-d97b-47a5-96dc-a033e3c4bc8c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386322 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9sl2\" (UniqueName: \"kubernetes.io/projected/ea217be5-30fd-43fe-8901-33ea461d4f48-kube-api-access-s9sl2\") pod \"catalog-operator-68c6474976-wcj44\" (UID: \"ea217be5-30fd-43fe-8901-33ea461d4f48\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386348 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/4ed994f7-d70c-4220-a142-f23cb0550377-etcd-service-ca\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386387 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/afd3a02c-c392-4d05-8152-009157a52bf2-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-t7rc7\" (UID: \"afd3a02c-c392-4d05-8152-009157a52bf2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386411 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.386441 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-skhr4"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387105 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/014bf10b-9df2-4cdf-b1e7-f852a56044cc-srv-cert\") pod \"olm-operator-6b444d44fb-cnv9g\" (UID: \"014bf10b-9df2-4cdf-b1e7-f852a56044cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387249 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5c763b0-7d68-4d32-871e-7e58ac32f03f-serving-cert\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387284 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387294 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387337 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/014bf10b-9df2-4cdf-b1e7-f852a56044cc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-cnv9g\" (UID: \"014bf10b-9df2-4cdf-b1e7-f852a56044cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387375 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9xps\" (UniqueName: \"kubernetes.io/projected/8b74c1fc-0aa2-4c74-80a1-3743defdbe7a-kube-api-access-p9xps\") pod \"ingress-operator-5b745b69d9-6dlmm\" (UID: \"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387428 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387457 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387504 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ad7cc35-130e-4404-8a4f-173884ab4e41-serving-cert\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387539 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/4ed994f7-d70c-4220-a142-f23cb0550377-etcd-ca\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387588 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-service-ca\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387620 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e483c02c-6538-4d83-89d0-8c474de6c1cb-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-mvk5j\" (UID: \"e483c02c-6538-4d83-89d0-8c474de6c1cb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387680 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-webhook-cert\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387711 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bsw6\" (UniqueName: \"kubernetes.io/projected/b5cfa364-a70a-4a62-a3ec-05285661a031-kube-api-access-2bsw6\") pod \"machine-approver-56656f9798-gb44m\" (UID: \"b5cfa364-a70a-4a62-a3ec-05285661a031\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387765 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08c157ad-8d10-4284-a4e4-eb0b55a56542-serving-cert\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387795 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/73d814ac-d3f7-4ed1-a37c-cfb7f988e88c-trusted-ca\") pod \"console-operator-58897d9998-xg47p\" (UID: \"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c\") " pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387840 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ed994f7-d70c-4220-a142-f23cb0550377-config\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387868 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-default-certificate\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.387907 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ed994f7-d70c-4220-a142-f23cb0550377-serving-cert\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.388007 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/446806b9-7941-43d7-885c-61c1d577811d-serving-cert\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.388043 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2k7c\" (UniqueName: \"kubernetes.io/projected/73d814ac-d3f7-4ed1-a37c-cfb7f988e88c-kube-api-access-n2k7c\") pod \"console-operator-58897d9998-xg47p\" (UID: \"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c\") " pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.388100 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.388142 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-trusted-ca-bundle\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.388192 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.388218 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.388264 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/afd3a02c-c392-4d05-8152-009157a52bf2-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-t7rc7\" (UID: \"afd3a02c-c392-4d05-8152-009157a52bf2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.388292 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pmht\" (UniqueName: \"kubernetes.io/projected/c7f5c01f-4820-4168-a19b-761a9e56650b-kube-api-access-6pmht\") pod \"openshift-apiserver-operator-796bbdcf4f-g8wkd\" (UID: \"c7f5c01f-4820-4168-a19b-761a9e56650b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.388349 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-g58mb"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.390646 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-blk8z"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.390696 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-hn7p6"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.390799 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5c763b0-7d68-4d32-871e-7e58ac32f03f-config\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.390023 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3ad7cc35-130e-4404-8a4f-173884ab4e41-audit-dir\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.390068 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/08c157ad-8d10-4284-a4e4-eb0b55a56542-audit-dir\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.390101 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-client-ca\") pod \"route-controller-manager-6576b87f9c-cv8sw\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.388096 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/08c157ad-8d10-4284-a4e4-eb0b55a56542-node-pullsecrets\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.390594 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-client-ca\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.391274 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kszgt\" (UniqueName: \"kubernetes.io/projected/0d5eb36d-4ded-4f1f-916d-140c5f814e27-kube-api-access-kszgt\") pod \"machine-config-controller-84d6567774-v4q28\" (UID: \"0d5eb36d-4ded-4f1f-916d-140c5f814e27\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.391366 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d1f7b13d-54c0-4876-bccc-723388dee681-images\") pod \"machine-config-operator-74547568cd-8ft75\" (UID: \"d1f7b13d-54c0-4876-bccc-723388dee681\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.391450 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-tmpfs\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.391524 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j897w\" (UniqueName: \"kubernetes.io/projected/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-kube-api-access-j897w\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.391632 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/00b22ed8-e9b5-4173-8574-b06254cd0965-audit-dir\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.391715 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rdchw\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.391791 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8b74c1fc-0aa2-4c74-80a1-3743defdbe7a-metrics-tls\") pod \"ingress-operator-5b745b69d9-6dlmm\" (UID: \"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.391844 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-client-ca\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.389740 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5351ce01-8233-4a92-b15d-befd4e57b0d2-config\") pod \"machine-api-operator-5694c8668f-twb6l\" (UID: \"5351ce01-8233-4a92-b15d-befd4e57b0d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.391174 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/5351ce01-8233-4a92-b15d-befd4e57b0d2-images\") pod \"machine-api-operator-5694c8668f-twb6l\" (UID: \"5351ce01-8233-4a92-b15d-befd4e57b0d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.391992 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392004 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c6742b80-a34d-4998-832f-29c88178b4ba-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-skhr4\" (UID: \"c6742b80-a34d-4998-832f-29c88178b4ba\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.389827 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-nk6zl\" (UID: \"da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392195 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b36eb87e-d18f-43f7-bd81-ffd76a8975d1-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-w55h7\" (UID: \"b36eb87e-d18f-43f7-bd81-ffd76a8975d1\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392280 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnknr\" (UniqueName: \"kubernetes.io/projected/c3329bde-d97b-47a5-96dc-a033e3c4bc8c-kube-api-access-qnknr\") pod \"control-plane-machine-set-operator-78cbb6b69f-qz6sr\" (UID: \"c3329bde-d97b-47a5-96dc-a033e3c4bc8c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392354 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392384 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392357 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4djqh\" (UniqueName: \"kubernetes.io/projected/00b22ed8-e9b5-4173-8574-b06254cd0965-kube-api-access-4djqh\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392473 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73d814ac-d3f7-4ed1-a37c-cfb7f988e88c-config\") pod \"console-operator-58897d9998-xg47p\" (UID: \"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c\") " pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392502 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8b74c1fc-0aa2-4c74-80a1-3743defdbe7a-trusted-ca\") pod \"ingress-operator-5b745b69d9-6dlmm\" (UID: \"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392524 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mg58z\" (UniqueName: \"kubernetes.io/projected/446806b9-7941-43d7-885c-61c1d577811d-kube-api-access-mg58z\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392597 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rdchw\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392622 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392645 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hs92h\" (UniqueName: \"kubernetes.io/projected/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-kube-api-access-hs92h\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392669 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-nk6zl\" (UID: \"da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392691 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-serving-cert\") pod \"route-controller-manager-6576b87f9c-cv8sw\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.389956 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.392294 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-hn7p6"] Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.393774 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/73d814ac-d3f7-4ed1-a37c-cfb7f988e88c-trusted-ca\") pod \"console-operator-58897d9998-xg47p\" (UID: \"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c\") " pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.394010 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/00b22ed8-e9b5-4173-8574-b06254cd0965-audit-dir\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.394168 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.394192 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d1f7b13d-54c0-4876-bccc-723388dee681-proxy-tls\") pod \"machine-config-operator-74547568cd-8ft75\" (UID: \"d1f7b13d-54c0-4876-bccc-723388dee681\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.394584 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-audit\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.394656 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-etcd-serving-ca\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.394720 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3ad7cc35-130e-4404-8a4f-173884ab4e41-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.394755 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73d814ac-d3f7-4ed1-a37c-cfb7f988e88c-config\") pod \"console-operator-58897d9998-xg47p\" (UID: \"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c\") " pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.394871 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7f5c01f-4820-4168-a19b-761a9e56650b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-g8wkd\" (UID: \"c7f5c01f-4820-4168-a19b-761a9e56650b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.394980 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7f5c01f-4820-4168-a19b-761a9e56650b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-g8wkd\" (UID: \"c7f5c01f-4820-4168-a19b-761a9e56650b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.395049 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-apiservice-cert\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.395168 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-trusted-ca-bundle\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.395552 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3ad7cc35-130e-4404-8a4f-173884ab4e41-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.395674 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-audit\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.395742 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qngnh\" (UniqueName: \"kubernetes.io/projected/3ad7cc35-130e-4404-8a4f-173884ab4e41-kube-api-access-qngnh\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.395868 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw6s7\" (UniqueName: \"kubernetes.io/projected/c6742b80-a34d-4998-832f-29c88178b4ba-kube-api-access-tw6s7\") pod \"multus-admission-controller-857f4d67dd-skhr4\" (UID: \"c6742b80-a34d-4998-832f-29c88178b4ba\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.395943 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-stats-auth\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396034 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/08c157ad-8d10-4284-a4e4-eb0b55a56542-encryption-config\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396082 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-service-ca-bundle\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396110 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-audit-policies\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396148 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396207 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d73449ca-6268-455c-a1b5-79e0c6c79779-serving-cert\") pod \"openshift-config-operator-7777fb866f-bd82s\" (UID: \"d73449ca-6268-455c-a1b5-79e0c6c79779\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396188 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7f5c01f-4820-4168-a19b-761a9e56650b-config\") pod \"openshift-apiserver-operator-796bbdcf4f-g8wkd\" (UID: \"c7f5c01f-4820-4168-a19b-761a9e56650b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396267 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-etcd-serving-ca\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396310 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8b74c1fc-0aa2-4c74-80a1-3743defdbe7a-trusted-ca\") pod \"ingress-operator-5b745b69d9-6dlmm\" (UID: \"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396326 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-trusted-ca-bundle\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396370 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/afd3a02c-c392-4d05-8152-009157a52bf2-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-t7rc7\" (UID: \"afd3a02c-c392-4d05-8152-009157a52bf2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396516 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b5c763b0-7d68-4d32-871e-7e58ac32f03f-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396588 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-oauth-serving-cert\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396626 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdvq9\" (UniqueName: \"kubernetes.io/projected/5351ce01-8233-4a92-b15d-befd4e57b0d2-kube-api-access-cdvq9\") pod \"machine-api-operator-5694c8668f-twb6l\" (UID: \"5351ce01-8233-4a92-b15d-befd4e57b0d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396636 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-audit-policies\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396766 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2v99\" (UniqueName: \"kubernetes.io/projected/b36eb87e-d18f-43f7-bd81-ffd76a8975d1-kube-api-access-k2v99\") pod \"package-server-manager-789f6589d5-w55h7\" (UID: \"b36eb87e-d18f-43f7-bd81-ffd76a8975d1\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396805 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-metrics-certs\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.396838 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtwq4\" (UniqueName: \"kubernetes.io/projected/d9929f0c-776d-4583-94ca-bd665b5d9783-kube-api-access-gtwq4\") pod \"marketplace-operator-79b997595-rdchw\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397186 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397230 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-service-ca\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397359 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-trusted-ca-bundle\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397463 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkrtm\" (UniqueName: \"kubernetes.io/projected/4ed994f7-d70c-4220-a142-f23cb0550377-kube-api-access-vkrtm\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397520 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqzfl\" (UniqueName: \"kubernetes.io/projected/da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209-kube-api-access-dqzfl\") pod \"openshift-controller-manager-operator-756b6f6bc6-nk6zl\" (UID: \"da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397551 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397602 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3ad7cc35-130e-4404-8a4f-173884ab4e41-etcd-client\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397622 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b5c763b0-7d68-4d32-871e-7e58ac32f03f-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397629 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlprx\" (UniqueName: \"kubernetes.io/projected/08c157ad-8d10-4284-a4e4-eb0b55a56542-kube-api-access-wlprx\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397690 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-image-import-ca\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397720 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5ghr\" (UniqueName: \"kubernetes.io/projected/d1f7b13d-54c0-4876-bccc-723388dee681-kube-api-access-k5ghr\") pod \"machine-config-operator-74547568cd-8ft75\" (UID: \"d1f7b13d-54c0-4876-bccc-723388dee681\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397747 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/afd3a02c-c392-4d05-8152-009157a52bf2-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-t7rc7\" (UID: \"afd3a02c-c392-4d05-8152-009157a52bf2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397756 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-config\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397821 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4ddz\" (UniqueName: \"kubernetes.io/projected/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-kube-api-access-n4ddz\") pod \"route-controller-manager-6576b87f9c-cv8sw\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397852 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ea217be5-30fd-43fe-8901-33ea461d4f48-profile-collector-cert\") pod \"catalog-operator-68c6474976-wcj44\" (UID: \"ea217be5-30fd-43fe-8901-33ea461d4f48\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397880 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/d73449ca-6268-455c-a1b5-79e0c6c79779-available-featuregates\") pod \"openshift-config-operator-7777fb866f-bd82s\" (UID: \"d73449ca-6268-455c-a1b5-79e0c6c79779\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397906 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8g2g\" (UniqueName: \"kubernetes.io/projected/014bf10b-9df2-4cdf-b1e7-f852a56044cc-kube-api-access-x8g2g\") pod \"olm-operator-6b444d44fb-cnv9g\" (UID: \"014bf10b-9df2-4cdf-b1e7-f852a56044cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397915 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/afd3a02c-c392-4d05-8152-009157a52bf2-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-t7rc7\" (UID: \"afd3a02c-c392-4d05-8152-009157a52bf2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.397929 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d1f7b13d-54c0-4876-bccc-723388dee681-auth-proxy-config\") pod \"machine-config-operator-74547568cd-8ft75\" (UID: \"d1f7b13d-54c0-4876-bccc-723388dee681\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398062 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkplg\" (UniqueName: \"kubernetes.io/projected/afd3a02c-c392-4d05-8152-009157a52bf2-kube-api-access-qkplg\") pod \"cluster-image-registry-operator-dc59b4c8b-t7rc7\" (UID: \"afd3a02c-c392-4d05-8152-009157a52bf2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398089 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0d5eb36d-4ded-4f1f-916d-140c5f814e27-proxy-tls\") pod \"machine-config-controller-84d6567774-v4q28\" (UID: \"0d5eb36d-4ded-4f1f-916d-140c5f814e27\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398112 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5cfa364-a70a-4a62-a3ec-05285661a031-config\") pod \"machine-approver-56656f9798-gb44m\" (UID: \"b5cfa364-a70a-4a62-a3ec-05285661a031\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398132 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398152 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8b74c1fc-0aa2-4c74-80a1-3743defdbe7a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-6dlmm\" (UID: \"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398213 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-oauth-serving-cert\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398236 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ad7cc35-130e-4404-8a4f-173884ab4e41-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398259 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/d73449ca-6268-455c-a1b5-79e0c6c79779-available-featuregates\") pod \"openshift-config-operator-7777fb866f-bd82s\" (UID: \"d73449ca-6268-455c-a1b5-79e0c6c79779\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398219 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/446806b9-7941-43d7-885c-61c1d577811d-serving-cert\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398347 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-config\") pod \"route-controller-manager-6576b87f9c-cv8sw\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398723 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5cfa364-a70a-4a62-a3ec-05285661a031-config\") pod \"machine-approver-56656f9798-gb44m\" (UID: \"b5cfa364-a70a-4a62-a3ec-05285661a031\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398776 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/08c157ad-8d10-4284-a4e4-eb0b55a56542-etcd-client\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398799 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-config\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398795 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3ad7cc35-130e-4404-8a4f-173884ab4e41-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.398844 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e483c02c-6538-4d83-89d0-8c474de6c1cb-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-mvk5j\" (UID: \"e483c02c-6538-4d83-89d0-8c474de6c1cb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.399094 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b5cfa364-a70a-4a62-a3ec-05285661a031-auth-proxy-config\") pod \"machine-approver-56656f9798-gb44m\" (UID: \"b5cfa364-a70a-4a62-a3ec-05285661a031\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.399123 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-config\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.399199 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95jrj\" (UniqueName: \"kubernetes.io/projected/99e5c770-c7b9-496d-93c7-bd8151e1bd49-kube-api-access-95jrj\") pod \"dns-operator-744455d44c-s289l\" (UID: \"99e5c770-c7b9-496d-93c7-bd8151e1bd49\") " pod="openshift-dns-operator/dns-operator-744455d44c-s289l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.399260 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3ad7cc35-130e-4404-8a4f-173884ab4e41-encryption-config\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.399293 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/99e5c770-c7b9-496d-93c7-bd8151e1bd49-metrics-tls\") pod \"dns-operator-744455d44c-s289l\" (UID: \"99e5c770-c7b9-496d-93c7-bd8151e1bd49\") " pod="openshift-dns-operator/dns-operator-744455d44c-s289l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.399365 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-config\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.399393 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ea217be5-30fd-43fe-8901-33ea461d4f48-srv-cert\") pod \"catalog-operator-68c6474976-wcj44\" (UID: \"ea217be5-30fd-43fe-8901-33ea461d4f48\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.399473 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-config\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.399514 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/08c157ad-8d10-4284-a4e4-eb0b55a56542-image-import-ca\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.399574 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3ad7cc35-130e-4404-8a4f-173884ab4e41-audit-policies\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.399935 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b5cfa364-a70a-4a62-a3ec-05285661a031-auth-proxy-config\") pod \"machine-approver-56656f9798-gb44m\" (UID: \"b5cfa364-a70a-4a62-a3ec-05285661a031\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.400297 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3ad7cc35-130e-4404-8a4f-173884ab4e41-audit-policies\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.400693 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b5c763b0-7d68-4d32-871e-7e58ac32f03f-service-ca-bundle\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.400839 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-config\") pod \"route-controller-manager-6576b87f9c-cv8sw\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.400930 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/73d814ac-d3f7-4ed1-a37c-cfb7f988e88c-serving-cert\") pod \"console-operator-58897d9998-xg47p\" (UID: \"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c\") " pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.400976 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-oauth-config\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.401019 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/5351ce01-8233-4a92-b15d-befd4e57b0d2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-twb6l\" (UID: \"5351ce01-8233-4a92-b15d-befd4e57b0d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.401096 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/6c5b8b29-e065-4587-88d6-cd93b56a1657-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-p57pg\" (UID: \"6c5b8b29-e065-4587-88d6-cd93b56a1657\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.401130 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3ad7cc35-130e-4404-8a4f-173884ab4e41-etcd-client\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.401139 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-serving-cert\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.401214 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cs482\" (UniqueName: \"kubernetes.io/projected/d73449ca-6268-455c-a1b5-79e0c6c79779-kube-api-access-cs482\") pod \"openshift-config-operator-7777fb866f-bd82s\" (UID: \"d73449ca-6268-455c-a1b5-79e0c6c79779\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.401301 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-config\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.401401 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b5c763b0-7d68-4d32-871e-7e58ac32f03f-service-ca-bundle\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.401929 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.402253 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.402345 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b5c763b0-7d68-4d32-871e-7e58ac32f03f-serving-cert\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.402754 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.402907 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.403176 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/b5cfa364-a70a-4a62-a3ec-05285661a031-machine-approver-tls\") pod \"machine-approver-56656f9798-gb44m\" (UID: \"b5cfa364-a70a-4a62-a3ec-05285661a031\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.403329 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.403429 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/8b74c1fc-0aa2-4c74-80a1-3743defdbe7a-metrics-tls\") pod \"ingress-operator-5b745b69d9-6dlmm\" (UID: \"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.403558 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.404270 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08c157ad-8d10-4284-a4e4-eb0b55a56542-serving-cert\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.404458 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/99e5c770-c7b9-496d-93c7-bd8151e1bd49-metrics-tls\") pod \"dns-operator-744455d44c-s289l\" (UID: \"99e5c770-c7b9-496d-93c7-bd8151e1bd49\") " pod="openshift-dns-operator/dns-operator-744455d44c-s289l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.404664 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.404726 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-nk6zl\" (UID: \"da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.404760 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3ad7cc35-130e-4404-8a4f-173884ab4e41-serving-cert\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.404816 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c7f5c01f-4820-4168-a19b-761a9e56650b-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-g8wkd\" (UID: \"c7f5c01f-4820-4168-a19b-761a9e56650b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.405035 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3ad7cc35-130e-4404-8a4f-173884ab4e41-encryption-config\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.405291 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/5351ce01-8233-4a92-b15d-befd4e57b0d2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-twb6l\" (UID: \"5351ce01-8233-4a92-b15d-befd4e57b0d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.405305 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/08c157ad-8d10-4284-a4e4-eb0b55a56542-encryption-config\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.405392 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-serving-cert\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.406352 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/73d814ac-d3f7-4ed1-a37c-cfb7f988e88c-serving-cert\") pod \"console-operator-58897d9998-xg47p\" (UID: \"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c\") " pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.406487 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-serving-cert\") pod \"route-controller-manager-6576b87f9c-cv8sw\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.406746 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.414359 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.419259 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/6c5b8b29-e065-4587-88d6-cd93b56a1657-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-p57pg\" (UID: \"6c5b8b29-e065-4587-88d6-cd93b56a1657\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.434716 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.454523 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.478064 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.493909 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502116 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4ed994f7-d70c-4220-a142-f23cb0550377-etcd-client\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502164 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0d5eb36d-4ded-4f1f-916d-140c5f814e27-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-v4q28\" (UID: \"0d5eb36d-4ded-4f1f-916d-140c5f814e27\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502185 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e483c02c-6538-4d83-89d0-8c474de6c1cb-config\") pod \"kube-controller-manager-operator-78b949d7b-mvk5j\" (UID: \"e483c02c-6538-4d83-89d0-8c474de6c1cb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502203 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4257s\" (UniqueName: \"kubernetes.io/projected/326e56a0-cea9-40a2-ac71-cac257ebf902-kube-api-access-4257s\") pod \"migrator-59844c95c7-4w9xd\" (UID: \"326e56a0-cea9-40a2-ac71-cac257ebf902\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4w9xd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502235 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/c3329bde-d97b-47a5-96dc-a033e3c4bc8c-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-qz6sr\" (UID: \"c3329bde-d97b-47a5-96dc-a033e3c4bc8c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502256 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9sl2\" (UniqueName: \"kubernetes.io/projected/ea217be5-30fd-43fe-8901-33ea461d4f48-kube-api-access-s9sl2\") pod \"catalog-operator-68c6474976-wcj44\" (UID: \"ea217be5-30fd-43fe-8901-33ea461d4f48\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502277 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/4ed994f7-d70c-4220-a142-f23cb0550377-etcd-service-ca\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502300 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/014bf10b-9df2-4cdf-b1e7-f852a56044cc-srv-cert\") pod \"olm-operator-6b444d44fb-cnv9g\" (UID: \"014bf10b-9df2-4cdf-b1e7-f852a56044cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502315 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/014bf10b-9df2-4cdf-b1e7-f852a56044cc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-cnv9g\" (UID: \"014bf10b-9df2-4cdf-b1e7-f852a56044cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502336 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e483c02c-6538-4d83-89d0-8c474de6c1cb-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-mvk5j\" (UID: \"e483c02c-6538-4d83-89d0-8c474de6c1cb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502351 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/4ed994f7-d70c-4220-a142-f23cb0550377-etcd-ca\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502366 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-webhook-cert\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502386 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ed994f7-d70c-4220-a142-f23cb0550377-config\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502403 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-default-certificate\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502421 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ed994f7-d70c-4220-a142-f23cb0550377-serving-cert\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502454 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kszgt\" (UniqueName: \"kubernetes.io/projected/0d5eb36d-4ded-4f1f-916d-140c5f814e27-kube-api-access-kszgt\") pod \"machine-config-controller-84d6567774-v4q28\" (UID: \"0d5eb36d-4ded-4f1f-916d-140c5f814e27\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502470 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d1f7b13d-54c0-4876-bccc-723388dee681-images\") pod \"machine-config-operator-74547568cd-8ft75\" (UID: \"d1f7b13d-54c0-4876-bccc-723388dee681\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502491 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-tmpfs\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502509 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j897w\" (UniqueName: \"kubernetes.io/projected/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-kube-api-access-j897w\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502528 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rdchw\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502546 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c6742b80-a34d-4998-832f-29c88178b4ba-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-skhr4\" (UID: \"c6742b80-a34d-4998-832f-29c88178b4ba\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502563 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b36eb87e-d18f-43f7-bd81-ffd76a8975d1-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-w55h7\" (UID: \"b36eb87e-d18f-43f7-bd81-ffd76a8975d1\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502580 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnknr\" (UniqueName: \"kubernetes.io/projected/c3329bde-d97b-47a5-96dc-a033e3c4bc8c-kube-api-access-qnknr\") pod \"control-plane-machine-set-operator-78cbb6b69f-qz6sr\" (UID: \"c3329bde-d97b-47a5-96dc-a033e3c4bc8c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502609 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rdchw\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502626 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hs92h\" (UniqueName: \"kubernetes.io/projected/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-kube-api-access-hs92h\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502641 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d1f7b13d-54c0-4876-bccc-723388dee681-proxy-tls\") pod \"machine-config-operator-74547568cd-8ft75\" (UID: \"d1f7b13d-54c0-4876-bccc-723388dee681\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502663 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-apiservice-cert\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502680 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw6s7\" (UniqueName: \"kubernetes.io/projected/c6742b80-a34d-4998-832f-29c88178b4ba-kube-api-access-tw6s7\") pod \"multus-admission-controller-857f4d67dd-skhr4\" (UID: \"c6742b80-a34d-4998-832f-29c88178b4ba\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502696 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-service-ca-bundle\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502712 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-stats-auth\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502729 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2v99\" (UniqueName: \"kubernetes.io/projected/b36eb87e-d18f-43f7-bd81-ffd76a8975d1-kube-api-access-k2v99\") pod \"package-server-manager-789f6589d5-w55h7\" (UID: \"b36eb87e-d18f-43f7-bd81-ffd76a8975d1\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502750 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-metrics-certs\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502767 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtwq4\" (UniqueName: \"kubernetes.io/projected/d9929f0c-776d-4583-94ca-bd665b5d9783-kube-api-access-gtwq4\") pod \"marketplace-operator-79b997595-rdchw\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502782 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkrtm\" (UniqueName: \"kubernetes.io/projected/4ed994f7-d70c-4220-a142-f23cb0550377-kube-api-access-vkrtm\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502808 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5ghr\" (UniqueName: \"kubernetes.io/projected/d1f7b13d-54c0-4876-bccc-723388dee681-kube-api-access-k5ghr\") pod \"machine-config-operator-74547568cd-8ft75\" (UID: \"d1f7b13d-54c0-4876-bccc-723388dee681\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502828 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8g2g\" (UniqueName: \"kubernetes.io/projected/014bf10b-9df2-4cdf-b1e7-f852a56044cc-kube-api-access-x8g2g\") pod \"olm-operator-6b444d44fb-cnv9g\" (UID: \"014bf10b-9df2-4cdf-b1e7-f852a56044cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502846 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ea217be5-30fd-43fe-8901-33ea461d4f48-profile-collector-cert\") pod \"catalog-operator-68c6474976-wcj44\" (UID: \"ea217be5-30fd-43fe-8901-33ea461d4f48\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502863 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0d5eb36d-4ded-4f1f-916d-140c5f814e27-proxy-tls\") pod \"machine-config-controller-84d6567774-v4q28\" (UID: \"0d5eb36d-4ded-4f1f-916d-140c5f814e27\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502877 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d1f7b13d-54c0-4876-bccc-723388dee681-auth-proxy-config\") pod \"machine-config-operator-74547568cd-8ft75\" (UID: \"d1f7b13d-54c0-4876-bccc-723388dee681\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502907 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e483c02c-6538-4d83-89d0-8c474de6c1cb-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-mvk5j\" (UID: \"e483c02c-6538-4d83-89d0-8c474de6c1cb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.502928 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ea217be5-30fd-43fe-8901-33ea461d4f48-srv-cert\") pod \"catalog-operator-68c6474976-wcj44\" (UID: \"ea217be5-30fd-43fe-8901-33ea461d4f48\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.503265 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0d5eb36d-4ded-4f1f-916d-140c5f814e27-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-v4q28\" (UID: \"0d5eb36d-4ded-4f1f-916d-140c5f814e27\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.504197 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-tmpfs\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.504839 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d1f7b13d-54c0-4876-bccc-723388dee681-images\") pod \"machine-config-operator-74547568cd-8ft75\" (UID: \"d1f7b13d-54c0-4876-bccc-723388dee681\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.504880 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d1f7b13d-54c0-4876-bccc-723388dee681-auth-proxy-config\") pod \"machine-config-operator-74547568cd-8ft75\" (UID: \"d1f7b13d-54c0-4876-bccc-723388dee681\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.507536 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d1f7b13d-54c0-4876-bccc-723388dee681-proxy-tls\") pod \"machine-config-operator-74547568cd-8ft75\" (UID: \"d1f7b13d-54c0-4876-bccc-723388dee681\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.514223 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.534163 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.553767 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.574525 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.594262 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.615456 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.634286 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.654403 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.674156 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.674753 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e483c02c-6538-4d83-89d0-8c474de6c1cb-config\") pod \"kube-controller-manager-operator-78b949d7b-mvk5j\" (UID: \"e483c02c-6538-4d83-89d0-8c474de6c1cb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.697871 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.708403 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e483c02c-6538-4d83-89d0-8c474de6c1cb-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-mvk5j\" (UID: \"e483c02c-6538-4d83-89d0-8c474de6c1cb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.714301 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.735804 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.756357 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.774179 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.794987 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.807558 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-default-certificate\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.816858 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.825145 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-service-ca-bundle\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.836016 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.855096 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.868217 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-metrics-certs\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.874268 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.877352 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-stats-auth\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.914247 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.934579 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.954414 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.974574 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.988841 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ea217be5-30fd-43fe-8901-33ea461d4f48-srv-cert\") pod \"catalog-operator-68c6474976-wcj44\" (UID: \"ea217be5-30fd-43fe-8901-33ea461d4f48\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:41 crc kubenswrapper[4769]: I1125 09:46:41.994655 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.016049 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.029189 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ea217be5-30fd-43fe-8901-33ea461d4f48-profile-collector-cert\") pod \"catalog-operator-68c6474976-wcj44\" (UID: \"ea217be5-30fd-43fe-8901-33ea461d4f48\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.029428 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/014bf10b-9df2-4cdf-b1e7-f852a56044cc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-cnv9g\" (UID: \"014bf10b-9df2-4cdf-b1e7-f852a56044cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.035422 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.054284 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.074701 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.096493 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.113387 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.134204 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.154610 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.169316 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0d5eb36d-4ded-4f1f-916d-140c5f814e27-proxy-tls\") pod \"machine-config-controller-84d6567774-v4q28\" (UID: \"0d5eb36d-4ded-4f1f-916d-140c5f814e27\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.175000 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.193285 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.196656 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/c3329bde-d97b-47a5-96dc-a033e3c4bc8c-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-qz6sr\" (UID: \"c3329bde-d97b-47a5-96dc-a033e3c4bc8c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.214178 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.234258 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.246003 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/4ed994f7-d70c-4220-a142-f23cb0550377-etcd-ca\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.254045 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.265557 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ed994f7-d70c-4220-a142-f23cb0550377-config\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.274077 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.284626 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/4ed994f7-d70c-4220-a142-f23cb0550377-etcd-service-ca\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.292320 4769 request.go:700] Waited for 1.010467447s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-etcd-operator/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.294035 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.315261 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.329658 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ed994f7-d70c-4220-a142-f23cb0550377-serving-cert\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.335704 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.353494 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.369372 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/4ed994f7-d70c-4220-a142-f23cb0550377-etcd-client\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.374345 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.389914 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b36eb87e-d18f-43f7-bd81-ffd76a8975d1-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-w55h7\" (UID: \"b36eb87e-d18f-43f7-bd81-ffd76a8975d1\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.402864 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.415171 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.443020 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.454902 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.474587 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.494196 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.498927 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-rdchw\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:42 crc kubenswrapper[4769]: E1125 09:46:42.503721 4769 configmap.go:193] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: failed to sync configmap cache: timed out waiting for the condition Nov 25 09:46:42 crc kubenswrapper[4769]: E1125 09:46:42.503786 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-trusted-ca podName:d9929f0c-776d-4583-94ca-bd665b5d9783 nodeName:}" failed. No retries permitted until 2025-11-25 09:46:43.003767741 +0000 UTC m=+151.588740064 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-trusted-ca") pod "marketplace-operator-79b997595-rdchw" (UID: "d9929f0c-776d-4583-94ca-bd665b5d9783") : failed to sync configmap cache: timed out waiting for the condition Nov 25 09:46:42 crc kubenswrapper[4769]: E1125 09:46:42.503781 4769 secret.go:188] Couldn't get secret openshift-multus/multus-admission-controller-secret: failed to sync secret cache: timed out waiting for the condition Nov 25 09:46:42 crc kubenswrapper[4769]: E1125 09:46:42.503877 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c6742b80-a34d-4998-832f-29c88178b4ba-webhook-certs podName:c6742b80-a34d-4998-832f-29c88178b4ba nodeName:}" failed. No retries permitted until 2025-11-25 09:46:43.003851884 +0000 UTC m=+151.588824227 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/c6742b80-a34d-4998-832f-29c88178b4ba-webhook-certs") pod "multus-admission-controller-857f4d67dd-skhr4" (UID: "c6742b80-a34d-4998-832f-29c88178b4ba") : failed to sync secret cache: timed out waiting for the condition Nov 25 09:46:42 crc kubenswrapper[4769]: E1125 09:46:42.503924 4769 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 09:46:42 crc kubenswrapper[4769]: E1125 09:46:42.504015 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-webhook-cert podName:0a7bb2e3-eb5e-43a5-88f5-df93dff3afce nodeName:}" failed. No retries permitted until 2025-11-25 09:46:43.003956098 +0000 UTC m=+151.588928441 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-cert" (UniqueName: "kubernetes.io/secret/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-webhook-cert") pod "packageserver-d55dfcdfc-d7jmg" (UID: "0a7bb2e3-eb5e-43a5-88f5-df93dff3afce") : failed to sync secret cache: timed out waiting for the condition Nov 25 09:46:42 crc kubenswrapper[4769]: E1125 09:46:42.504019 4769 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/olm-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 09:46:42 crc kubenswrapper[4769]: E1125 09:46:42.504067 4769 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/packageserver-service-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 09:46:42 crc kubenswrapper[4769]: E1125 09:46:42.504100 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-apiservice-cert podName:0a7bb2e3-eb5e-43a5-88f5-df93dff3afce nodeName:}" failed. No retries permitted until 2025-11-25 09:46:43.004090554 +0000 UTC m=+151.589062867 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "apiservice-cert" (UniqueName: "kubernetes.io/secret/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-apiservice-cert") pod "packageserver-d55dfcdfc-d7jmg" (UID: "0a7bb2e3-eb5e-43a5-88f5-df93dff3afce") : failed to sync secret cache: timed out waiting for the condition Nov 25 09:46:42 crc kubenswrapper[4769]: E1125 09:46:42.504161 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/014bf10b-9df2-4cdf-b1e7-f852a56044cc-srv-cert podName:014bf10b-9df2-4cdf-b1e7-f852a56044cc nodeName:}" failed. No retries permitted until 2025-11-25 09:46:43.004122665 +0000 UTC m=+151.589095018 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/014bf10b-9df2-4cdf-b1e7-f852a56044cc-srv-cert") pod "olm-operator-6b444d44fb-cnv9g" (UID: "014bf10b-9df2-4cdf-b1e7-f852a56044cc") : failed to sync secret cache: timed out waiting for the condition Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.524894 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.534194 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.555537 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.574453 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.594774 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.615291 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.636828 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.675423 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.693926 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.714616 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.733775 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.753409 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.774444 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.793793 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.814141 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.835016 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.860648 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.874321 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.894797 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.913845 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.935877 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.955867 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.975569 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 09:46:42 crc kubenswrapper[4769]: I1125 09:46:42.995017 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.014622 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.029642 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/014bf10b-9df2-4cdf-b1e7-f852a56044cc-srv-cert\") pod \"olm-operator-6b444d44fb-cnv9g\" (UID: \"014bf10b-9df2-4cdf-b1e7-f852a56044cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.029783 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-webhook-cert\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.029929 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rdchw\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.030028 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c6742b80-a34d-4998-832f-29c88178b4ba-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-skhr4\" (UID: \"c6742b80-a34d-4998-832f-29c88178b4ba\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.030222 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-apiservice-cert\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.032155 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-rdchw\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.037760 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.038747 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-webhook-cert\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.039601 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-apiservice-cert\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.040634 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/014bf10b-9df2-4cdf-b1e7-f852a56044cc-srv-cert\") pod \"olm-operator-6b444d44fb-cnv9g\" (UID: \"014bf10b-9df2-4cdf-b1e7-f852a56044cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.042054 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c6742b80-a34d-4998-832f-29c88178b4ba-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-skhr4\" (UID: \"c6742b80-a34d-4998-832f-29c88178b4ba\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.055235 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.074894 4769 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.094941 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.129029 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwh87\" (UniqueName: \"kubernetes.io/projected/6c5b8b29-e065-4587-88d6-cd93b56a1657-kube-api-access-dwh87\") pod \"cluster-samples-operator-665b6dd947-p57pg\" (UID: \"6c5b8b29-e065-4587-88d6-cd93b56a1657\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.155010 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-568dv\" (UniqueName: \"kubernetes.io/projected/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-kube-api-access-568dv\") pod \"console-f9d7485db-ghgwj\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.168095 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pmht\" (UniqueName: \"kubernetes.io/projected/c7f5c01f-4820-4168-a19b-761a9e56650b-kube-api-access-6pmht\") pod \"openshift-apiserver-operator-796bbdcf4f-g8wkd\" (UID: \"c7f5c01f-4820-4168-a19b-761a9e56650b\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.188634 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f668f\" (UniqueName: \"kubernetes.io/projected/01b73250-0ba8-4c2d-9ada-dacc89a70a7d-kube-api-access-f668f\") pod \"downloads-7954f5f757-l2kg2\" (UID: \"01b73250-0ba8-4c2d-9ada-dacc89a70a7d\") " pod="openshift-console/downloads-7954f5f757-l2kg2" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.209209 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dh2mj\" (UniqueName: \"kubernetes.io/projected/b5c763b0-7d68-4d32-871e-7e58ac32f03f-kube-api-access-dh2mj\") pod \"authentication-operator-69f744f599-tn4zj\" (UID: \"b5c763b0-7d68-4d32-871e-7e58ac32f03f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.232274 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/afd3a02c-c392-4d05-8152-009157a52bf2-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-t7rc7\" (UID: \"afd3a02c-c392-4d05-8152-009157a52bf2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.254760 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9xps\" (UniqueName: \"kubernetes.io/projected/8b74c1fc-0aa2-4c74-80a1-3743defdbe7a-kube-api-access-p9xps\") pod \"ingress-operator-5b745b69d9-6dlmm\" (UID: \"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.259020 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.276065 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bsw6\" (UniqueName: \"kubernetes.io/projected/b5cfa364-a70a-4a62-a3ec-05285661a031-kube-api-access-2bsw6\") pod \"machine-approver-56656f9798-gb44m\" (UID: \"b5cfa364-a70a-4a62-a3ec-05285661a031\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.295490 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2k7c\" (UniqueName: \"kubernetes.io/projected/73d814ac-d3f7-4ed1-a37c-cfb7f988e88c-kube-api-access-n2k7c\") pod \"console-operator-58897d9998-xg47p\" (UID: \"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c\") " pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.312306 4769 request.go:700] Waited for 1.919249657s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns/configmaps?fieldSelector=metadata.name%3Ddns-default&limit=500&resourceVersion=0 Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.313832 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4djqh\" (UniqueName: \"kubernetes.io/projected/00b22ed8-e9b5-4173-8574-b06254cd0965-kube-api-access-4djqh\") pod \"oauth-openshift-558db77b4-q8hkd\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.313842 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.329678 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.334512 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.354921 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.376616 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.409359 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mg58z\" (UniqueName: \"kubernetes.io/projected/446806b9-7941-43d7-885c-61c1d577811d-kube-api-access-mg58z\") pod \"controller-manager-879f6c89f-zjqhj\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.411058 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qngnh\" (UniqueName: \"kubernetes.io/projected/3ad7cc35-130e-4404-8a4f-173884ab4e41-kube-api-access-qngnh\") pod \"apiserver-7bbb656c7d-w82wc\" (UID: \"3ad7cc35-130e-4404-8a4f-173884ab4e41\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.429557 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.433050 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdvq9\" (UniqueName: \"kubernetes.io/projected/5351ce01-8233-4a92-b15d-befd4e57b0d2-kube-api-access-cdvq9\") pod \"machine-api-operator-5694c8668f-twb6l\" (UID: \"5351ce01-8233-4a92-b15d-befd4e57b0d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.441456 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.454774 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlprx\" (UniqueName: \"kubernetes.io/projected/08c157ad-8d10-4284-a4e4-eb0b55a56542-kube-api-access-wlprx\") pod \"apiserver-76f77b778f-shvhw\" (UID: \"08c157ad-8d10-4284-a4e4-eb0b55a56542\") " pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.461107 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.469093 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-l2kg2" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.475173 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqzfl\" (UniqueName: \"kubernetes.io/projected/da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209-kube-api-access-dqzfl\") pod \"openshift-controller-manager-operator-756b6f6bc6-nk6zl\" (UID: \"da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.480878 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.493101 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4ddz\" (UniqueName: \"kubernetes.io/projected/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-kube-api-access-n4ddz\") pod \"route-controller-manager-6576b87f9c-cv8sw\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.522268 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.523684 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-tn4zj"] Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.530336 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkplg\" (UniqueName: \"kubernetes.io/projected/afd3a02c-c392-4d05-8152-009157a52bf2-kube-api-access-qkplg\") pod \"cluster-image-registry-operator-dc59b4c8b-t7rc7\" (UID: \"afd3a02c-c392-4d05-8152-009157a52bf2\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.536919 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8b74c1fc-0aa2-4c74-80a1-3743defdbe7a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-6dlmm\" (UID: \"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.555359 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95jrj\" (UniqueName: \"kubernetes.io/projected/99e5c770-c7b9-496d-93c7-bd8151e1bd49-kube-api-access-95jrj\") pod \"dns-operator-744455d44c-s289l\" (UID: \"99e5c770-c7b9-496d-93c7-bd8151e1bd49\") " pod="openshift-dns-operator/dns-operator-744455d44c-s289l" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.572853 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cs482\" (UniqueName: \"kubernetes.io/projected/d73449ca-6268-455c-a1b5-79e0c6c79779-kube-api-access-cs482\") pod \"openshift-config-operator-7777fb866f-bd82s\" (UID: \"d73449ca-6268-455c-a1b5-79e0c6c79779\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.575250 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.577013 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.593111 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4257s\" (UniqueName: \"kubernetes.io/projected/326e56a0-cea9-40a2-ac71-cac257ebf902-kube-api-access-4257s\") pod \"migrator-59844c95c7-4w9xd\" (UID: \"326e56a0-cea9-40a2-ac71-cac257ebf902\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4w9xd" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.598279 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-ghgwj"] Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.608249 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.611959 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9sl2\" (UniqueName: \"kubernetes.io/projected/ea217be5-30fd-43fe-8901-33ea461d4f48-kube-api-access-s9sl2\") pod \"catalog-operator-68c6474976-wcj44\" (UID: \"ea217be5-30fd-43fe-8901-33ea461d4f48\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.634075 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e483c02c-6538-4d83-89d0-8c474de6c1cb-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-mvk5j\" (UID: \"e483c02c-6538-4d83-89d0-8c474de6c1cb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.648780 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4w9xd" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.654795 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.657552 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hs92h\" (UniqueName: \"kubernetes.io/projected/0a7bb2e3-eb5e-43a5-88f5-df93dff3afce-kube-api-access-hs92h\") pod \"packageserver-d55dfcdfc-d7jmg\" (UID: \"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.658812 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.668808 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j897w\" (UniqueName: \"kubernetes.io/projected/bfcd91d2-c8b0-4648-bcc5-207b11a54ece-kube-api-access-j897w\") pod \"router-default-5444994796-zs9d4\" (UID: \"bfcd91d2-c8b0-4648-bcc5-207b11a54ece\") " pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:43 crc kubenswrapper[4769]: W1125 09:46:43.675594 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e9574b9_23c7_46ec_98e4_8d4b4a6d97a4.slice/crio-256dcf706467a83aaeea17bb1c3a4e9874c360be99e24b31988a49e5e71bc3c0 WatchSource:0}: Error finding container 256dcf706467a83aaeea17bb1c3a4e9874c360be99e24b31988a49e5e71bc3c0: Status 404 returned error can't find the container with id 256dcf706467a83aaeea17bb1c3a4e9874c360be99e24b31988a49e5e71bc3c0 Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.684727 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.689677 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnknr\" (UniqueName: \"kubernetes.io/projected/c3329bde-d97b-47a5-96dc-a033e3c4bc8c-kube-api-access-qnknr\") pod \"control-plane-machine-set-operator-78cbb6b69f-qz6sr\" (UID: \"c3329bde-d97b-47a5-96dc-a033e3c4bc8c\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.705150 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.708157 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtwq4\" (UniqueName: \"kubernetes.io/projected/d9929f0c-776d-4583-94ca-bd665b5d9783-kube-api-access-gtwq4\") pod \"marketplace-operator-79b997595-rdchw\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.713992 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.727058 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.727355 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd"] Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.734855 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.739250 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw6s7\" (UniqueName: \"kubernetes.io/projected/c6742b80-a34d-4998-832f-29c88178b4ba-kube-api-access-tw6s7\") pod \"multus-admission-controller-857f4d67dd-skhr4\" (UID: \"c6742b80-a34d-4998-832f-29c88178b4ba\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.750613 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkrtm\" (UniqueName: \"kubernetes.io/projected/4ed994f7-d70c-4220-a142-f23cb0550377-kube-api-access-vkrtm\") pod \"etcd-operator-b45778765-qthpd\" (UID: \"4ed994f7-d70c-4220-a142-f23cb0550377\") " pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.753373 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.773892 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-xg47p"] Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.774353 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-s289l" Nov 25 09:46:43 crc kubenswrapper[4769]: W1125 09:46:43.774451 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7f5c01f_4820_4168_a19b_761a9e56650b.slice/crio-ef022254e864027f78efa0d4310e4ef57ff3e088537fb0a2a9e1ebe3a11d96ab WatchSource:0}: Error finding container ef022254e864027f78efa0d4310e4ef57ff3e088537fb0a2a9e1ebe3a11d96ab: Status 404 returned error can't find the container with id ef022254e864027f78efa0d4310e4ef57ff3e088537fb0a2a9e1ebe3a11d96ab Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.775106 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5ghr\" (UniqueName: \"kubernetes.io/projected/d1f7b13d-54c0-4876-bccc-723388dee681-kube-api-access-k5ghr\") pod \"machine-config-operator-74547568cd-8ft75\" (UID: \"d1f7b13d-54c0-4876-bccc-723388dee681\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.793156 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8g2g\" (UniqueName: \"kubernetes.io/projected/014bf10b-9df2-4cdf-b1e7-f852a56044cc-kube-api-access-x8g2g\") pod \"olm-operator-6b444d44fb-cnv9g\" (UID: \"014bf10b-9df2-4cdf-b1e7-f852a56044cc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.811655 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2v99\" (UniqueName: \"kubernetes.io/projected/b36eb87e-d18f-43f7-bd81-ffd76a8975d1-kube-api-access-k2v99\") pod \"package-server-manager-789f6589d5-w55h7\" (UID: \"b36eb87e-d18f-43f7-bd81-ffd76a8975d1\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.852309 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kszgt\" (UniqueName: \"kubernetes.io/projected/0d5eb36d-4ded-4f1f-916d-140c5f814e27-kube-api-access-kszgt\") pod \"machine-config-controller-84d6567774-v4q28\" (UID: \"0d5eb36d-4ded-4f1f-916d-140c5f814e27\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.888442 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.891421 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg"] Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.919473 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-shvhw"] Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.931035 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.940377 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960204 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fb4b7e91-71e7-4719-b0c5-35d132cf6115-registry-certificates\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960258 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72c6ce4d-c36a-4f0e-98d5-217e98231c9f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f8mb8\" (UID: \"72c6ce4d-c36a-4f0e-98d5-217e98231c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960306 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72c6ce4d-c36a-4f0e-98d5-217e98231c9f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f8mb8\" (UID: \"72c6ce4d-c36a-4f0e-98d5-217e98231c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960437 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72c6ce4d-c36a-4f0e-98d5-217e98231c9f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f8mb8\" (UID: \"72c6ce4d-c36a-4f0e-98d5-217e98231c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960533 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-registry-tls\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960585 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fb4b7e91-71e7-4719-b0c5-35d132cf6115-ca-trust-extracted\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960612 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb4b7e91-71e7-4719-b0c5-35d132cf6115-trusted-ca\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960659 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8775d5bd-5cae-49f4-9582-a541dfd05c15-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-vp9pc\" (UID: \"8775d5bd-5cae-49f4-9582-a541dfd05c15\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960716 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960764 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hljhv\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-kube-api-access-hljhv\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960798 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f4b3ab4-5512-4d2e-ac26-020b304c7cb4-config\") pod \"kube-apiserver-operator-766d6c64bb-82vd5\" (UID: \"1f4b3ab4-5512-4d2e-ac26-020b304c7cb4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960822 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fb4b7e91-71e7-4719-b0c5-35d132cf6115-installation-pull-secrets\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.960848 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f4b3ab4-5512-4d2e-ac26-020b304c7cb4-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-82vd5\" (UID: \"1f4b3ab4-5512-4d2e-ac26-020b304c7cb4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.961099 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8775d5bd-5cae-49f4-9582-a541dfd05c15-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-vp9pc\" (UID: \"8775d5bd-5cae-49f4-9582-a541dfd05c15\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.961175 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbwpq\" (UniqueName: \"kubernetes.io/projected/8775d5bd-5cae-49f4-9582-a541dfd05c15-kube-api-access-lbwpq\") pod \"kube-storage-version-migrator-operator-b67b599dd-vp9pc\" (UID: \"8775d5bd-5cae-49f4-9582-a541dfd05c15\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.961238 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-bound-sa-token\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.961262 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1f4b3ab4-5512-4d2e-ac26-020b304c7cb4-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-82vd5\" (UID: \"1f4b3ab4-5512-4d2e-ac26-020b304c7cb4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" Nov 25 09:46:43 crc kubenswrapper[4769]: E1125 09:46:43.961327 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:44.461312823 +0000 UTC m=+153.046285136 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.967905 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.972716 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.995631 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.998061 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-q8hkd"] Nov 25 09:46:43 crc kubenswrapper[4769]: I1125 09:46:43.998201 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.020863 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.035859 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.062710 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:44 crc kubenswrapper[4769]: E1125 09:46:44.062894 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:44.562864245 +0000 UTC m=+153.147836548 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.062996 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66cdfd99-a795-4a7d-bdc4-fdb11a2bba75-serving-cert\") pod \"service-ca-operator-777779d784-g58mb\" (UID: \"66cdfd99-a795-4a7d-bdc4-fdb11a2bba75\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.063427 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f4b3ab4-5512-4d2e-ac26-020b304c7cb4-config\") pod \"kube-apiserver-operator-766d6c64bb-82vd5\" (UID: \"1f4b3ab4-5512-4d2e-ac26-020b304c7cb4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.063453 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8blf\" (UniqueName: \"kubernetes.io/projected/92b7fa71-3918-4bc0-b4ad-93c5c19b88f5-kube-api-access-q8blf\") pod \"machine-config-server-g7vhl\" (UID: \"92b7fa71-3918-4bc0-b4ad-93c5c19b88f5\") " pod="openshift-machine-config-operator/machine-config-server-g7vhl" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.064078 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5f4j9\" (UniqueName: \"kubernetes.io/projected/21dad565-8414-4796-b777-e63f27b9c666-kube-api-access-5f4j9\") pod \"collect-profiles-29401065-bbzl9\" (UID: \"21dad565-8414-4796-b777-e63f27b9c666\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.064225 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fb4b7e91-71e7-4719-b0c5-35d132cf6115-installation-pull-secrets\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.064506 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f4b3ab4-5512-4d2e-ac26-020b304c7cb4-config\") pod \"kube-apiserver-operator-766d6c64bb-82vd5\" (UID: \"1f4b3ab4-5512-4d2e-ac26-020b304c7cb4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.065301 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0183ff9c-f592-4fa2-9775-9385cf6c9499-cert\") pod \"ingress-canary-blk8z\" (UID: \"0183ff9c-f592-4fa2-9775-9385cf6c9499\") " pod="openshift-ingress-canary/ingress-canary-blk8z" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.065694 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c7d0c499-f7f4-422c-9979-652cbf8c86bf-signing-key\") pod \"service-ca-9c57cc56f-wvj7p\" (UID: \"c7d0c499-f7f4-422c-9979-652cbf8c86bf\") " pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.065873 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f4b3ab4-5512-4d2e-ac26-020b304c7cb4-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-82vd5\" (UID: \"1f4b3ab4-5512-4d2e-ac26-020b304c7cb4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.065946 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8775d5bd-5cae-49f4-9582-a541dfd05c15-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-vp9pc\" (UID: \"8775d5bd-5cae-49f4-9582-a541dfd05c15\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066054 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbwpq\" (UniqueName: \"kubernetes.io/projected/8775d5bd-5cae-49f4-9582-a541dfd05c15-kube-api-access-lbwpq\") pod \"kube-storage-version-migrator-operator-b67b599dd-vp9pc\" (UID: \"8775d5bd-5cae-49f4-9582-a541dfd05c15\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066150 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-bound-sa-token\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066167 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1f4b3ab4-5512-4d2e-ac26-020b304c7cb4-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-82vd5\" (UID: \"1f4b3ab4-5512-4d2e-ac26-020b304c7cb4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066185 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/92b7fa71-3918-4bc0-b4ad-93c5c19b88f5-node-bootstrap-token\") pod \"machine-config-server-g7vhl\" (UID: \"92b7fa71-3918-4bc0-b4ad-93c5c19b88f5\") " pod="openshift-machine-config-operator/machine-config-server-g7vhl" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066244 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/92b7fa71-3918-4bc0-b4ad-93c5c19b88f5-certs\") pod \"machine-config-server-g7vhl\" (UID: \"92b7fa71-3918-4bc0-b4ad-93c5c19b88f5\") " pod="openshift-machine-config-operator/machine-config-server-g7vhl" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066325 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjzlp\" (UniqueName: \"kubernetes.io/projected/0183ff9c-f592-4fa2-9775-9385cf6c9499-kube-api-access-tjzlp\") pod \"ingress-canary-blk8z\" (UID: \"0183ff9c-f592-4fa2-9775-9385cf6c9499\") " pod="openshift-ingress-canary/ingress-canary-blk8z" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066369 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-plugins-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066385 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8add56f-d9a7-4a9a-8f68-b7900166c27f-config-volume\") pod \"dns-default-hn7p6\" (UID: \"d8add56f-d9a7-4a9a-8f68-b7900166c27f\") " pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066400 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66cdfd99-a795-4a7d-bdc4-fdb11a2bba75-config\") pod \"service-ca-operator-777779d784-g58mb\" (UID: \"66cdfd99-a795-4a7d-bdc4-fdb11a2bba75\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066481 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fb4b7e91-71e7-4719-b0c5-35d132cf6115-registry-certificates\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066539 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pw2c\" (UniqueName: \"kubernetes.io/projected/d8add56f-d9a7-4a9a-8f68-b7900166c27f-kube-api-access-2pw2c\") pod \"dns-default-hn7p6\" (UID: \"d8add56f-d9a7-4a9a-8f68-b7900166c27f\") " pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066572 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72c6ce4d-c36a-4f0e-98d5-217e98231c9f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f8mb8\" (UID: \"72c6ce4d-c36a-4f0e-98d5-217e98231c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066635 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/21dad565-8414-4796-b777-e63f27b9c666-config-volume\") pod \"collect-profiles-29401065-bbzl9\" (UID: \"21dad565-8414-4796-b777-e63f27b9c666\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066707 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72c6ce4d-c36a-4f0e-98d5-217e98231c9f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f8mb8\" (UID: \"72c6ce4d-c36a-4f0e-98d5-217e98231c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066725 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-csi-data-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066784 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-registration-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066864 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72c6ce4d-c36a-4f0e-98d5-217e98231c9f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f8mb8\" (UID: \"72c6ce4d-c36a-4f0e-98d5-217e98231c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066901 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/21dad565-8414-4796-b777-e63f27b9c666-secret-volume\") pod \"collect-profiles-29401065-bbzl9\" (UID: \"21dad565-8414-4796-b777-e63f27b9c666\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066928 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-registry-tls\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.066981 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bx9q\" (UniqueName: \"kubernetes.io/projected/c7d0c499-f7f4-422c-9979-652cbf8c86bf-kube-api-access-8bx9q\") pod \"service-ca-9c57cc56f-wvj7p\" (UID: \"c7d0c499-f7f4-422c-9979-652cbf8c86bf\") " pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.068298 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72c6ce4d-c36a-4f0e-98d5-217e98231c9f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f8mb8\" (UID: \"72c6ce4d-c36a-4f0e-98d5-217e98231c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.070872 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fb4b7e91-71e7-4719-b0c5-35d132cf6115-registry-certificates\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.072477 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8775d5bd-5cae-49f4-9582-a541dfd05c15-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-vp9pc\" (UID: \"8775d5bd-5cae-49f4-9582-a541dfd05c15\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.087319 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-l2kg2"] Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.087871 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-mountpoint-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.087986 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fb4b7e91-71e7-4719-b0c5-35d132cf6115-ca-trust-extracted\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.088110 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb4b7e91-71e7-4719-b0c5-35d132cf6115-trusted-ca\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.089647 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-socket-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.103541 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb4b7e91-71e7-4719-b0c5-35d132cf6115-trusted-ca\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.104074 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fb4b7e91-71e7-4719-b0c5-35d132cf6115-ca-trust-extracted\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.105693 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tv7k\" (UniqueName: \"kubernetes.io/projected/66cdfd99-a795-4a7d-bdc4-fdb11a2bba75-kube-api-access-4tv7k\") pod \"service-ca-operator-777779d784-g58mb\" (UID: \"66cdfd99-a795-4a7d-bdc4-fdb11a2bba75\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.109593 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d8add56f-d9a7-4a9a-8f68-b7900166c27f-metrics-tls\") pod \"dns-default-hn7p6\" (UID: \"d8add56f-d9a7-4a9a-8f68-b7900166c27f\") " pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.110857 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72c6ce4d-c36a-4f0e-98d5-217e98231c9f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f8mb8\" (UID: \"72c6ce4d-c36a-4f0e-98d5-217e98231c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.111330 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8775d5bd-5cae-49f4-9582-a541dfd05c15-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-vp9pc\" (UID: \"8775d5bd-5cae-49f4-9582-a541dfd05c15\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.111493 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9wxg\" (UniqueName: \"kubernetes.io/projected/86a2de7a-5314-47ef-b827-92050023a677-kube-api-access-w9wxg\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.111780 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c7d0c499-f7f4-422c-9979-652cbf8c86bf-signing-cabundle\") pod \"service-ca-9c57cc56f-wvj7p\" (UID: \"c7d0c499-f7f4-422c-9979-652cbf8c86bf\") " pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.113362 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f4b3ab4-5512-4d2e-ac26-020b304c7cb4-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-82vd5\" (UID: \"1f4b3ab4-5512-4d2e-ac26-020b304c7cb4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.113887 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: E1125 09:46:44.114511 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:44.614496586 +0000 UTC m=+153.199468899 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.114793 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hljhv\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-kube-api-access-hljhv\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.123805 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fb4b7e91-71e7-4719-b0c5-35d132cf6115-installation-pull-secrets\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.125427 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-registry-tls\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.128712 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8775d5bd-5cae-49f4-9582-a541dfd05c15-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-vp9pc\" (UID: \"8775d5bd-5cae-49f4-9582-a541dfd05c15\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.130773 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" event={"ID":"b5cfa364-a70a-4a62-a3ec-05285661a031","Type":"ContainerStarted","Data":"333c4342481030e7652ca5a9823ae12a259a6b5e16cfbde3550921b460102b39"} Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.130830 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" event={"ID":"b5cfa364-a70a-4a62-a3ec-05285661a031","Type":"ContainerStarted","Data":"ecb8b5a153081603141a436b45a155d98d8b3467efb7060d192acafdea2ef984"} Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.131810 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ghgwj" event={"ID":"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4","Type":"ContainerStarted","Data":"bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185"} Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.131836 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ghgwj" event={"ID":"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4","Type":"ContainerStarted","Data":"256dcf706467a83aaeea17bb1c3a4e9874c360be99e24b31988a49e5e71bc3c0"} Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.134946 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" event={"ID":"c7f5c01f-4820-4168-a19b-761a9e56650b","Type":"ContainerStarted","Data":"ef022254e864027f78efa0d4310e4ef57ff3e088537fb0a2a9e1ebe3a11d96ab"} Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.136366 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-shvhw" event={"ID":"08c157ad-8d10-4284-a4e4-eb0b55a56542","Type":"ContainerStarted","Data":"766f50a722f3a5e944371d0d45c33cc954f4a4ed403e48839246d85e3789cc1e"} Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.142098 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" event={"ID":"b5c763b0-7d68-4d32-871e-7e58ac32f03f","Type":"ContainerStarted","Data":"f569dc46565bf4f74217f645b342d6542ba6baf3339947702cedd3859ee36b46"} Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.142172 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" event={"ID":"b5c763b0-7d68-4d32-871e-7e58ac32f03f","Type":"ContainerStarted","Data":"67be65340955f4de869386e0c2090e2b11318af6b8adb7a09e1dac162c6dedd6"} Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.143321 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/72c6ce4d-c36a-4f0e-98d5-217e98231c9f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f8mb8\" (UID: \"72c6ce4d-c36a-4f0e-98d5-217e98231c9f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.143529 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-xg47p" event={"ID":"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c","Type":"ContainerStarted","Data":"83858de479cd7612cb3512200ed671bced48b2750db841150c6e8e16c1d8be63"} Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.144275 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbwpq\" (UniqueName: \"kubernetes.io/projected/8775d5bd-5cae-49f4-9582-a541dfd05c15-kube-api-access-lbwpq\") pod \"kube-storage-version-migrator-operator-b67b599dd-vp9pc\" (UID: \"8775d5bd-5cae-49f4-9582-a541dfd05c15\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.151991 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1f4b3ab4-5512-4d2e-ac26-020b304c7cb4-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-82vd5\" (UID: \"1f4b3ab4-5512-4d2e-ac26-020b304c7cb4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.180146 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-bound-sa-token\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.200696 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.206681 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217086 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217463 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66cdfd99-a795-4a7d-bdc4-fdb11a2bba75-serving-cert\") pod \"service-ca-operator-777779d784-g58mb\" (UID: \"66cdfd99-a795-4a7d-bdc4-fdb11a2bba75\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217497 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5f4j9\" (UniqueName: \"kubernetes.io/projected/21dad565-8414-4796-b777-e63f27b9c666-kube-api-access-5f4j9\") pod \"collect-profiles-29401065-bbzl9\" (UID: \"21dad565-8414-4796-b777-e63f27b9c666\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217528 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8blf\" (UniqueName: \"kubernetes.io/projected/92b7fa71-3918-4bc0-b4ad-93c5c19b88f5-kube-api-access-q8blf\") pod \"machine-config-server-g7vhl\" (UID: \"92b7fa71-3918-4bc0-b4ad-93c5c19b88f5\") " pod="openshift-machine-config-operator/machine-config-server-g7vhl" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217556 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0183ff9c-f592-4fa2-9775-9385cf6c9499-cert\") pod \"ingress-canary-blk8z\" (UID: \"0183ff9c-f592-4fa2-9775-9385cf6c9499\") " pod="openshift-ingress-canary/ingress-canary-blk8z" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217583 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c7d0c499-f7f4-422c-9979-652cbf8c86bf-signing-key\") pod \"service-ca-9c57cc56f-wvj7p\" (UID: \"c7d0c499-f7f4-422c-9979-652cbf8c86bf\") " pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217633 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/92b7fa71-3918-4bc0-b4ad-93c5c19b88f5-node-bootstrap-token\") pod \"machine-config-server-g7vhl\" (UID: \"92b7fa71-3918-4bc0-b4ad-93c5c19b88f5\") " pod="openshift-machine-config-operator/machine-config-server-g7vhl" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217655 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/92b7fa71-3918-4bc0-b4ad-93c5c19b88f5-certs\") pod \"machine-config-server-g7vhl\" (UID: \"92b7fa71-3918-4bc0-b4ad-93c5c19b88f5\") " pod="openshift-machine-config-operator/machine-config-server-g7vhl" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217686 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjzlp\" (UniqueName: \"kubernetes.io/projected/0183ff9c-f592-4fa2-9775-9385cf6c9499-kube-api-access-tjzlp\") pod \"ingress-canary-blk8z\" (UID: \"0183ff9c-f592-4fa2-9775-9385cf6c9499\") " pod="openshift-ingress-canary/ingress-canary-blk8z" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217712 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-plugins-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217735 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8add56f-d9a7-4a9a-8f68-b7900166c27f-config-volume\") pod \"dns-default-hn7p6\" (UID: \"d8add56f-d9a7-4a9a-8f68-b7900166c27f\") " pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217760 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66cdfd99-a795-4a7d-bdc4-fdb11a2bba75-config\") pod \"service-ca-operator-777779d784-g58mb\" (UID: \"66cdfd99-a795-4a7d-bdc4-fdb11a2bba75\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217817 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pw2c\" (UniqueName: \"kubernetes.io/projected/d8add56f-d9a7-4a9a-8f68-b7900166c27f-kube-api-access-2pw2c\") pod \"dns-default-hn7p6\" (UID: \"d8add56f-d9a7-4a9a-8f68-b7900166c27f\") " pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217844 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/21dad565-8414-4796-b777-e63f27b9c666-config-volume\") pod \"collect-profiles-29401065-bbzl9\" (UID: \"21dad565-8414-4796-b777-e63f27b9c666\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217883 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-csi-data-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217909 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-registration-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.217940 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/21dad565-8414-4796-b777-e63f27b9c666-secret-volume\") pod \"collect-profiles-29401065-bbzl9\" (UID: \"21dad565-8414-4796-b777-e63f27b9c666\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.218284 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bx9q\" (UniqueName: \"kubernetes.io/projected/c7d0c499-f7f4-422c-9979-652cbf8c86bf-kube-api-access-8bx9q\") pod \"service-ca-9c57cc56f-wvj7p\" (UID: \"c7d0c499-f7f4-422c-9979-652cbf8c86bf\") " pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.218320 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-mountpoint-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.218362 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-socket-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.218381 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tv7k\" (UniqueName: \"kubernetes.io/projected/66cdfd99-a795-4a7d-bdc4-fdb11a2bba75-kube-api-access-4tv7k\") pod \"service-ca-operator-777779d784-g58mb\" (UID: \"66cdfd99-a795-4a7d-bdc4-fdb11a2bba75\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.218418 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d8add56f-d9a7-4a9a-8f68-b7900166c27f-metrics-tls\") pod \"dns-default-hn7p6\" (UID: \"d8add56f-d9a7-4a9a-8f68-b7900166c27f\") " pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.218453 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9wxg\" (UniqueName: \"kubernetes.io/projected/86a2de7a-5314-47ef-b827-92050023a677-kube-api-access-w9wxg\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.218477 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c7d0c499-f7f4-422c-9979-652cbf8c86bf-signing-cabundle\") pod \"service-ca-9c57cc56f-wvj7p\" (UID: \"c7d0c499-f7f4-422c-9979-652cbf8c86bf\") " pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" Nov 25 09:46:44 crc kubenswrapper[4769]: E1125 09:46:44.219511 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:44.719485657 +0000 UTC m=+153.304457970 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.219764 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-csi-data-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.220421 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-mountpoint-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.220493 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-plugins-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.220539 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c7d0c499-f7f4-422c-9979-652cbf8c86bf-signing-cabundle\") pod \"service-ca-9c57cc56f-wvj7p\" (UID: \"c7d0c499-f7f4-422c-9979-652cbf8c86bf\") " pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.220810 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-socket-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.224763 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8add56f-d9a7-4a9a-8f68-b7900166c27f-config-volume\") pod \"dns-default-hn7p6\" (UID: \"d8add56f-d9a7-4a9a-8f68-b7900166c27f\") " pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.225798 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/21dad565-8414-4796-b777-e63f27b9c666-config-volume\") pod \"collect-profiles-29401065-bbzl9\" (UID: \"21dad565-8414-4796-b777-e63f27b9c666\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.228670 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66cdfd99-a795-4a7d-bdc4-fdb11a2bba75-config\") pod \"service-ca-operator-777779d784-g58mb\" (UID: \"66cdfd99-a795-4a7d-bdc4-fdb11a2bba75\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.229514 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/86a2de7a-5314-47ef-b827-92050023a677-registration-dir\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.232490 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66cdfd99-a795-4a7d-bdc4-fdb11a2bba75-serving-cert\") pod \"service-ca-operator-777779d784-g58mb\" (UID: \"66cdfd99-a795-4a7d-bdc4-fdb11a2bba75\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.234124 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d8add56f-d9a7-4a9a-8f68-b7900166c27f-metrics-tls\") pod \"dns-default-hn7p6\" (UID: \"d8add56f-d9a7-4a9a-8f68-b7900166c27f\") " pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.246473 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c7d0c499-f7f4-422c-9979-652cbf8c86bf-signing-key\") pod \"service-ca-9c57cc56f-wvj7p\" (UID: \"c7d0c499-f7f4-422c-9979-652cbf8c86bf\") " pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.249688 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/21dad565-8414-4796-b777-e63f27b9c666-secret-volume\") pod \"collect-profiles-29401065-bbzl9\" (UID: \"21dad565-8414-4796-b777-e63f27b9c666\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.256107 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/92b7fa71-3918-4bc0-b4ad-93c5c19b88f5-node-bootstrap-token\") pod \"machine-config-server-g7vhl\" (UID: \"92b7fa71-3918-4bc0-b4ad-93c5c19b88f5\") " pod="openshift-machine-config-operator/machine-config-server-g7vhl" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.258785 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/92b7fa71-3918-4bc0-b4ad-93c5c19b88f5-certs\") pod \"machine-config-server-g7vhl\" (UID: \"92b7fa71-3918-4bc0-b4ad-93c5c19b88f5\") " pod="openshift-machine-config-operator/machine-config-server-g7vhl" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.263121 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0183ff9c-f592-4fa2-9775-9385cf6c9499-cert\") pod \"ingress-canary-blk8z\" (UID: \"0183ff9c-f592-4fa2-9775-9385cf6c9499\") " pod="openshift-ingress-canary/ingress-canary-blk8z" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.263377 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bx9q\" (UniqueName: \"kubernetes.io/projected/c7d0c499-f7f4-422c-9979-652cbf8c86bf-kube-api-access-8bx9q\") pod \"service-ca-9c57cc56f-wvj7p\" (UID: \"c7d0c499-f7f4-422c-9979-652cbf8c86bf\") " pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.268635 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hljhv\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-kube-api-access-hljhv\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.285852 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5f4j9\" (UniqueName: \"kubernetes.io/projected/21dad565-8414-4796-b777-e63f27b9c666-kube-api-access-5f4j9\") pod \"collect-profiles-29401065-bbzl9\" (UID: \"21dad565-8414-4796-b777-e63f27b9c666\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.301819 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.306993 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8blf\" (UniqueName: \"kubernetes.io/projected/92b7fa71-3918-4bc0-b4ad-93c5c19b88f5-kube-api-access-q8blf\") pod \"machine-config-server-g7vhl\" (UID: \"92b7fa71-3918-4bc0-b4ad-93c5c19b88f5\") " pod="openshift-machine-config-operator/machine-config-server-g7vhl" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.320127 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: E1125 09:46:44.320707 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:44.820685584 +0000 UTC m=+153.405657897 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.323626 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tv7k\" (UniqueName: \"kubernetes.io/projected/66cdfd99-a795-4a7d-bdc4-fdb11a2bba75-kube-api-access-4tv7k\") pod \"service-ca-operator-777779d784-g58mb\" (UID: \"66cdfd99-a795-4a7d-bdc4-fdb11a2bba75\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.330486 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9wxg\" (UniqueName: \"kubernetes.io/projected/86a2de7a-5314-47ef-b827-92050023a677-kube-api-access-w9wxg\") pod \"csi-hostpathplugin-sx97n\" (UID: \"86a2de7a-5314-47ef-b827-92050023a677\") " pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.341312 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.348641 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.355327 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.357023 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pw2c\" (UniqueName: \"kubernetes.io/projected/d8add56f-d9a7-4a9a-8f68-b7900166c27f-kube-api-access-2pw2c\") pod \"dns-default-hn7p6\" (UID: \"d8add56f-d9a7-4a9a-8f68-b7900166c27f\") " pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.366155 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-g7vhl" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.380832 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjzlp\" (UniqueName: \"kubernetes.io/projected/0183ff9c-f592-4fa2-9775-9385cf6c9499-kube-api-access-tjzlp\") pod \"ingress-canary-blk8z\" (UID: \"0183ff9c-f592-4fa2-9775-9385cf6c9499\") " pod="openshift-ingress-canary/ingress-canary-blk8z" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.410246 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.418876 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-sx97n" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.422292 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:44 crc kubenswrapper[4769]: E1125 09:46:44.423257 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:44.923230976 +0000 UTC m=+153.508203289 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.468728 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7"] Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.476159 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-bd82s"] Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.524744 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: E1125 09:46:44.525102 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.025089121 +0000 UTC m=+153.610061434 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.626081 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:44 crc kubenswrapper[4769]: E1125 09:46:44.626397 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.126377052 +0000 UTC m=+153.711349365 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.674777 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-blk8z" Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.727982 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: E1125 09:46:44.728596 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.22856579 +0000 UTC m=+153.813538103 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.759831 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm"] Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.829683 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:44 crc kubenswrapper[4769]: E1125 09:46:44.829917 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.329866602 +0000 UTC m=+153.914838915 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.830041 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:44 crc kubenswrapper[4769]: E1125 09:46:44.830451 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.330425375 +0000 UTC m=+153.915397688 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:44 crc kubenswrapper[4769]: I1125 09:46:44.934634 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:44 crc kubenswrapper[4769]: E1125 09:46:44.936766 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.435433756 +0000 UTC m=+154.020406069 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.036932 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.037873 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.537851603 +0000 UTC m=+154.122823936 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.140440 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.140704 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.640677176 +0000 UTC m=+154.225649489 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.141285 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.141688 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.641674347 +0000 UTC m=+154.226646660 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.179919 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-l2kg2" event={"ID":"01b73250-0ba8-4c2d-9ada-dacc89a70a7d","Type":"ContainerStarted","Data":"cbcd09f9eb2291016b505544a882629b83b7630a6e3e54802c28ba0f2ad01a92"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.179988 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-l2kg2" event={"ID":"01b73250-0ba8-4c2d-9ada-dacc89a70a7d","Type":"ContainerStarted","Data":"5c829f45cda995c2d2ab536adfa41cf314e67a299d3bd9efb5d0ab8b58f18c4d"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.199913 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" event={"ID":"00b22ed8-e9b5-4173-8574-b06254cd0965","Type":"ContainerStarted","Data":"6129d3e09c7d70decc52aea7155296f097686359b06532a0df19c51ec9f3e801"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.213005 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" event={"ID":"b5cfa364-a70a-4a62-a3ec-05285661a031","Type":"ContainerStarted","Data":"4715dade37bb578db81cd479366ffde4281c82befbc5a6bd1cb5d0d66da46263"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.225102 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" event={"ID":"afd3a02c-c392-4d05-8152-009157a52bf2","Type":"ContainerStarted","Data":"b8c21e21a446cedaaa16c07dcc5e93b9b8a58c3ec4c026daedabb074a6f3cfb5"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.236827 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.239251 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg" event={"ID":"6c5b8b29-e065-4587-88d6-cd93b56a1657","Type":"ContainerStarted","Data":"a3fe74c2baec975c7cdacfb8b563fcb59fd85d23751cb44aa19a38c5dc07c646"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.239306 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg" event={"ID":"6c5b8b29-e065-4587-88d6-cd93b56a1657","Type":"ContainerStarted","Data":"4e1af4d7d11a018fc166ae36ebe31bddabbfe87bab6562e74f9a6ad15a129b03"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.241899 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.242236 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.742207678 +0000 UTC m=+154.327179991 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.242339 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.242993 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.742983419 +0000 UTC m=+154.327955732 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.249179 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" event={"ID":"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a","Type":"ContainerStarted","Data":"bb46004e19e163b1a99681617f0bc0d94a8cdeb272100d5cf0991bf1b2517649"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.250748 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-xg47p" event={"ID":"73d814ac-d3f7-4ed1-a37c-cfb7f988e88c","Type":"ContainerStarted","Data":"04378efca6c642d109fea4cbac953feba01f901a21c32a818aee53662fffca07"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.251855 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.259055 4769 generic.go:334] "Generic (PLEG): container finished" podID="08c157ad-8d10-4284-a4e4-eb0b55a56542" containerID="e1282af1d7b3cb9c4e7f2c2c9785c0e7555294cca2d006daf438475ec1a4c119" exitCode=0 Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.259160 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-shvhw" event={"ID":"08c157ad-8d10-4284-a4e4-eb0b55a56542","Type":"ContainerDied","Data":"e1282af1d7b3cb9c4e7f2c2c9785c0e7555294cca2d006daf438475ec1a4c119"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.261378 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rdchw"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.272887 4769 patch_prober.go:28] interesting pod/console-operator-58897d9998-xg47p container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.272940 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-xg47p" podUID="73d814ac-d3f7-4ed1-a37c-cfb7f988e88c" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.277941 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-4w9xd"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.279886 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-g7vhl" event={"ID":"92b7fa71-3918-4bc0-b4ad-93c5c19b88f5","Type":"ContainerStarted","Data":"3a15ab7b522cb0b208ee9c90e1d62be04775d0078288d9b39d1a6b597385879d"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.279938 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-g7vhl" event={"ID":"92b7fa71-3918-4bc0-b4ad-93c5c19b88f5","Type":"ContainerStarted","Data":"a6379f6ce972eb3257e1bca76752d403716933ebccf92cf550640954f2f1afba"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.283039 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" event={"ID":"c7f5c01f-4820-4168-a19b-761a9e56650b","Type":"ContainerStarted","Data":"63e9fdcbe9c1df6e2351547c3ccc92436e19a4020705a52702a5449948b30ed9"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.287955 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zs9d4" event={"ID":"bfcd91d2-c8b0-4648-bcc5-207b11a54ece","Type":"ContainerStarted","Data":"eda574b25e31d1916c353b7bec8e9dc54441daf84680ca46a356c8961c3cc184"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.288013 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zs9d4" event={"ID":"bfcd91d2-c8b0-4648-bcc5-207b11a54ece","Type":"ContainerStarted","Data":"b5dd2f97b2abdd357f3a72afeffd13d1ecee8b2aecf65e8afff1d593092b8c64"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.299408 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zjqhj"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.301494 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.301771 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" event={"ID":"d73449ca-6268-455c-a1b5-79e0c6c79779","Type":"ContainerStarted","Data":"31049a129bb69ffd76494ef85482816da035e730b95fcf01c27949c788634042"} Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.323263 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg"] Nov 25 09:46:45 crc kubenswrapper[4769]: W1125 09:46:45.323788 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a7bb2e3_eb5e_43a5_88f5_df93dff3afce.slice/crio-6866825894bd33c980220b127b4e0503be9fda2d8e4eb2a6906515167984b6df WatchSource:0}: Error finding container 6866825894bd33c980220b127b4e0503be9fda2d8e4eb2a6906515167984b6df: Status 404 returned error can't find the container with id 6866825894bd33c980220b127b4e0503be9fda2d8e4eb2a6906515167984b6df Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.325709 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.329627 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-twb6l"] Nov 25 09:46:45 crc kubenswrapper[4769]: W1125 09:46:45.343297 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod446806b9_7941_43d7_885c_61c1d577811d.slice/crio-ac429310bee3eec5cf21a61ea1b7416c985b29d6fa94570cf51edbd939ff3257 WatchSource:0}: Error finding container ac429310bee3eec5cf21a61ea1b7416c985b29d6fa94570cf51edbd939ff3257: Status 404 returned error can't find the container with id ac429310bee3eec5cf21a61ea1b7416c985b29d6fa94570cf51edbd939ff3257 Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.343541 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.343805 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.843781611 +0000 UTC m=+154.428753934 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.344538 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.84451888 +0000 UTC m=+154.429491193 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.343929 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.449670 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.450175 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.950151247 +0000 UTC m=+154.535123560 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.450263 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.450599 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:45.950589075 +0000 UTC m=+154.535561388 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.488098 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.491050 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.525054 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-qthpd"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.535615 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.547051 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.560170 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.560413 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.060366508 +0000 UTC m=+154.645338821 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.560521 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.560925 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.06090171 +0000 UTC m=+154.645874023 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.578469 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-s289l"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.595883 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.598222 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.607327 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-skhr4"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.626759 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.629936 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g"] Nov 25 09:46:45 crc kubenswrapper[4769]: W1125 09:46:45.647093 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod162f7ecc_6c53_4f08_955e_3ad0e5a7186e.slice/crio-aac22c91662c7da6d2302a4f752c7333a805bd4ef7791ad2d39e16f5115a95da WatchSource:0}: Error finding container aac22c91662c7da6d2302a4f752c7333a805bd4ef7791ad2d39e16f5115a95da: Status 404 returned error can't find the container with id aac22c91662c7da6d2302a4f752c7333a805bd4ef7791ad2d39e16f5115a95da Nov 25 09:46:45 crc kubenswrapper[4769]: W1125 09:46:45.661268 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda8fd0f6_4f81_4bd2_8c8a_3d62ac19e209.slice/crio-8654971f6b519577be10890ebfebc4bb55bdecfca82e1301c5704413d1642b1d WatchSource:0}: Error finding container 8654971f6b519577be10890ebfebc4bb55bdecfca82e1301c5704413d1642b1d: Status 404 returned error can't find the container with id 8654971f6b519577be10890ebfebc4bb55bdecfca82e1301c5704413d1642b1d Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.661615 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.661859 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.161835396 +0000 UTC m=+154.746807709 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.661905 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.662507 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.162489182 +0000 UTC m=+154.747461495 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: W1125 09:46:45.672240 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc6742b80_a34d_4998_832f_29c88178b4ba.slice/crio-9e36d43174f042a1e81787bad9de6f8cd4021648d1d180e4577d27a0aba67e52 WatchSource:0}: Error finding container 9e36d43174f042a1e81787bad9de6f8cd4021648d1d180e4577d27a0aba67e52: Status 404 returned error can't find the container with id 9e36d43174f042a1e81787bad9de6f8cd4021648d1d180e4577d27a0aba67e52 Nov 25 09:46:45 crc kubenswrapper[4769]: W1125 09:46:45.674734 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod014bf10b_9df2_4cdf_b1e7_f852a56044cc.slice/crio-d45798a783631f0f4f62c089f2683571bc2c914a2ad58818386a219d602a5ebf WatchSource:0}: Error finding container d45798a783631f0f4f62c089f2683571bc2c914a2ad58818386a219d602a5ebf: Status 404 returned error can't find the container with id d45798a783631f0f4f62c089f2683571bc2c914a2ad58818386a219d602a5ebf Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.722200 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.724375 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-blk8z"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.736906 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-g58mb"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.761682 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-hn7p6"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.763353 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.763578 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.263557155 +0000 UTC m=+154.848529468 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.763617 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.763954 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.263947231 +0000 UTC m=+154.848919544 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.767513 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-sx97n"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.782235 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-zs9d4" podStartSLOduration=128.782216407 podStartE2EDuration="2m8.782216407s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:45.777480896 +0000 UTC m=+154.362453229" watchObservedRunningTime="2025-11-25 09:46:45.782216407 +0000 UTC m=+154.367188720" Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.786742 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-wvj7p"] Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.807899 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-xg47p" podStartSLOduration=128.807879781 podStartE2EDuration="2m8.807879781s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:45.807405792 +0000 UTC m=+154.392378145" watchObservedRunningTime="2025-11-25 09:46:45.807879781 +0000 UTC m=+154.392852094" Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.829261 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8"] Nov 25 09:46:45 crc kubenswrapper[4769]: W1125 09:46:45.832970 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66cdfd99_a795_4a7d_bdc4_fdb11a2bba75.slice/crio-42956546cf96245c8a6165b432a42f6d6e40d199e945c11634b74b703e18f6b9 WatchSource:0}: Error finding container 42956546cf96245c8a6165b432a42f6d6e40d199e945c11634b74b703e18f6b9: Status 404 returned error can't find the container with id 42956546cf96245c8a6165b432a42f6d6e40d199e945c11634b74b703e18f6b9 Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.848826 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-g8wkd" podStartSLOduration=128.84880638 podStartE2EDuration="2m8.84880638s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:45.84508258 +0000 UTC m=+154.430054893" watchObservedRunningTime="2025-11-25 09:46:45.84880638 +0000 UTC m=+154.433778693" Nov 25 09:46:45 crc kubenswrapper[4769]: W1125 09:46:45.852436 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86a2de7a_5314_47ef_b827_92050023a677.slice/crio-66275f690816083f07e8fe348913e44b0ff65a04cd22c49e8bf8f8f27d6a4e9f WatchSource:0}: Error finding container 66275f690816083f07e8fe348913e44b0ff65a04cd22c49e8bf8f8f27d6a4e9f: Status 404 returned error can't find the container with id 66275f690816083f07e8fe348913e44b0ff65a04cd22c49e8bf8f8f27d6a4e9f Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.864472 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.864948 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.36493153 +0000 UTC m=+154.949903843 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.889126 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9"] Nov 25 09:46:45 crc kubenswrapper[4769]: W1125 09:46:45.902112 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7d0c499_f7f4_422c_9979_652cbf8c86bf.slice/crio-e82d2eeaf3c54a3f053204aaf64c3b3422acebb8a1eb253dca8d88541ee22d55 WatchSource:0}: Error finding container e82d2eeaf3c54a3f053204aaf64c3b3422acebb8a1eb253dca8d88541ee22d55: Status 404 returned error can't find the container with id e82d2eeaf3c54a3f053204aaf64c3b3422acebb8a1eb253dca8d88541ee22d55 Nov 25 09:46:45 crc kubenswrapper[4769]: W1125 09:46:45.908421 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72c6ce4d_c36a_4f0e_98d5_217e98231c9f.slice/crio-e2376b3371b4403cf5e91f37113ab61a625942ab29327c6590c130cccce23bef WatchSource:0}: Error finding container e2376b3371b4403cf5e91f37113ab61a625942ab29327c6590c130cccce23bef: Status 404 returned error can't find the container with id e2376b3371b4403cf5e91f37113ab61a625942ab29327c6590c130cccce23bef Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.931777 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-g7vhl" podStartSLOduration=4.931759673 podStartE2EDuration="4.931759673s" podCreationTimestamp="2025-11-25 09:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:45.930776933 +0000 UTC m=+154.515749246" watchObservedRunningTime="2025-11-25 09:46:45.931759673 +0000 UTC m=+154.516731986" Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.940536 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:45 crc kubenswrapper[4769]: W1125 09:46:45.940182 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod21dad565_8414_4796_b777_e63f27b9c666.slice/crio-274abf9130bce1e994400be936cc1ff992fc76eb1849dfa56817acb8e9cd930e WatchSource:0}: Error finding container 274abf9130bce1e994400be936cc1ff992fc76eb1849dfa56817acb8e9cd930e: Status 404 returned error can't find the container with id 274abf9130bce1e994400be936cc1ff992fc76eb1849dfa56817acb8e9cd930e Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.959239 4769 patch_prober.go:28] interesting pod/router-default-5444994796-zs9d4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:46:45 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Nov 25 09:46:45 crc kubenswrapper[4769]: [+]process-running ok Nov 25 09:46:45 crc kubenswrapper[4769]: healthz check failed Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.959515 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zs9d4" podUID="bfcd91d2-c8b0-4648-bcc5-207b11a54ece" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:46:45 crc kubenswrapper[4769]: I1125 09:46:45.965817 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:45 crc kubenswrapper[4769]: E1125 09:46:45.966172 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.466158899 +0000 UTC m=+155.051131212 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.018540 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-tn4zj" podStartSLOduration=129.018522369 podStartE2EDuration="2m9.018522369s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:46.018161294 +0000 UTC m=+154.603133607" watchObservedRunningTime="2025-11-25 09:46:46.018522369 +0000 UTC m=+154.603494682" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.049593 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.056634 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-ghgwj" podStartSLOduration=129.056599603 podStartE2EDuration="2m9.056599603s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:46.051227687 +0000 UTC m=+154.636200000" watchObservedRunningTime="2025-11-25 09:46:46.056599603 +0000 UTC m=+154.641571916" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.070611 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.070889 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.570828746 +0000 UTC m=+155.155801069 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.071196 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.072521 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.572503084 +0000 UTC m=+155.157475397 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.085317 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-gb44m" podStartSLOduration=129.085296109 podStartE2EDuration="2m9.085296109s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:46.084404133 +0000 UTC m=+154.669376446" watchObservedRunningTime="2025-11-25 09:46:46.085296109 +0000 UTC m=+154.670268422" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.172402 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.172621 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.672593677 +0000 UTC m=+155.257565990 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.173312 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.174341 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.674318587 +0000 UTC m=+155.259290900 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.275516 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.276028 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.776005824 +0000 UTC m=+155.360978137 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.307593 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-shvhw" event={"ID":"08c157ad-8d10-4284-a4e4-eb0b55a56542","Type":"ContainerStarted","Data":"f67886c5d4084b0a817f4729092cb1d0d40f8770f02639a8bcb0a5893d3c5916"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.317633 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" event={"ID":"21dad565-8414-4796-b777-e63f27b9c666","Type":"ContainerStarted","Data":"274abf9130bce1e994400be936cc1ff992fc76eb1849dfa56817acb8e9cd930e"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.377365 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.378258 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.877725413 +0000 UTC m=+155.462697776 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.393208 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" event={"ID":"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a","Type":"ContainerStarted","Data":"f33e8d2344635d4d812e7a85891db7eee4d33e6d27cda474f5bf77c0968596a4"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.393274 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" event={"ID":"8b74c1fc-0aa2-4c74-80a1-3743defdbe7a","Type":"ContainerStarted","Data":"16aae2edb6e73b3d75eef10a51af724f3db313cbed939c75c097f39a1ad877d3"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.410051 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" event={"ID":"014bf10b-9df2-4cdf-b1e7-f852a56044cc","Type":"ContainerStarted","Data":"d45798a783631f0f4f62c089f2683571bc2c914a2ad58818386a219d602a5ebf"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.448376 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" event={"ID":"da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209","Type":"ContainerStarted","Data":"8654971f6b519577be10890ebfebc4bb55bdecfca82e1301c5704413d1642b1d"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.457403 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" event={"ID":"ea217be5-30fd-43fe-8901-33ea461d4f48","Type":"ContainerStarted","Data":"6a2bfa768a1577ad29c9e50ddb790fd72ff3004d270b521185806a43f40e63a6"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.457459 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" event={"ID":"ea217be5-30fd-43fe-8901-33ea461d4f48","Type":"ContainerStarted","Data":"22b2799090ee9537f4e01cbe97709abc2d579342f70306c9a04998cfe4ab3551"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.459023 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.465435 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-sx97n" event={"ID":"86a2de7a-5314-47ef-b827-92050023a677","Type":"ContainerStarted","Data":"66275f690816083f07e8fe348913e44b0ff65a04cd22c49e8bf8f8f27d6a4e9f"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.469117 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" event={"ID":"d9929f0c-776d-4583-94ca-bd665b5d9783","Type":"ContainerStarted","Data":"c187e12d695d96e42ebe72a492eb7ef7293e8b5cbf52078fdcf6d6e736636d13"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.469213 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" event={"ID":"d9929f0c-776d-4583-94ca-bd665b5d9783","Type":"ContainerStarted","Data":"27a615cefc6c94bcfb4b39b17b337e553b1bb6f8c8fe2c56ea60e7033286c022"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.470259 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.474560 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4w9xd" event={"ID":"326e56a0-cea9-40a2-ac71-cac257ebf902","Type":"ContainerStarted","Data":"f4893f80b5b44e1e2ac016b8efcd55237a14abb773c67b1afc68b3f48ac6704b"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.474623 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4w9xd" event={"ID":"326e56a0-cea9-40a2-ac71-cac257ebf902","Type":"ContainerStarted","Data":"67547b367aa76035d21dfdba65dcc74a63b8a75d30c1cedecff593931f2c0278"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.477336 4769 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-rdchw container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.477397 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" podUID="d9929f0c-776d-4583-94ca-bd665b5d9783" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.477342 4769 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-wcj44 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.477601 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" podUID="ea217be5-30fd-43fe-8901-33ea461d4f48" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.478185 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.478395 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.978369158 +0000 UTC m=+155.563341471 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.478642 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.479303 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:46.979294216 +0000 UTC m=+155.564266529 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.489053 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" event={"ID":"afd3a02c-c392-4d05-8152-009157a52bf2","Type":"ContainerStarted","Data":"95b67a17118ff861b98c6783ac07e8f9051969334acf24682cd6374847e675f2"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.497517 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" event={"ID":"66cdfd99-a795-4a7d-bdc4-fdb11a2bba75","Type":"ContainerStarted","Data":"42956546cf96245c8a6165b432a42f6d6e40d199e945c11634b74b703e18f6b9"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.581940 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" event={"ID":"d1f7b13d-54c0-4876-bccc-723388dee681","Type":"ContainerStarted","Data":"cd7813f6c1dbebcff19d296f0216ea175003762f73d39d147118a9e96fc62713"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.582024 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" event={"ID":"d1f7b13d-54c0-4876-bccc-723388dee681","Type":"ContainerStarted","Data":"4bb98356d66101bc948241f1c89ebee76db0c94547ad245f123031d5293637bb"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.582539 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.582900 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:47.082826468 +0000 UTC m=+155.667798781 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.605821 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" event={"ID":"00b22ed8-e9b5-4173-8574-b06254cd0965","Type":"ContainerStarted","Data":"0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.610068 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.620001 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" event={"ID":"b36eb87e-d18f-43f7-bd81-ffd76a8975d1","Type":"ContainerStarted","Data":"a710453213383dfb2d549281ff33c33683ea8d0f31d8cd9a4f6b82b283eb76a3"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.629007 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" event={"ID":"3ad7cc35-130e-4404-8a4f-173884ab4e41","Type":"ContainerStarted","Data":"08668eb17d6f98319e4217ffb5e687f315f1ad46699f4ac1196036fa9de2f1ec"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.634793 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-s289l" event={"ID":"99e5c770-c7b9-496d-93c7-bd8151e1bd49","Type":"ContainerStarted","Data":"1cf14a5f5e98141205db584642a203384f0c22349077890bb8edd91575e5f768"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.654767 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg" event={"ID":"6c5b8b29-e065-4587-88d6-cd93b56a1657","Type":"ContainerStarted","Data":"a63bd663e23c4a517b88338817ec9dad5268cf89b34f0a7068fb02f1847f2b51"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.669423 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" event={"ID":"c6742b80-a34d-4998-832f-29c88178b4ba","Type":"ContainerStarted","Data":"9e36d43174f042a1e81787bad9de6f8cd4021648d1d180e4577d27a0aba67e52"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.690482 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.692754 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:47.192732776 +0000 UTC m=+155.777705079 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.701925 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" event={"ID":"4ed994f7-d70c-4220-a142-f23cb0550377","Type":"ContainerStarted","Data":"07dc518e9f284e259af527df458a40f74018401b192599bf0fe44c13f67e644a"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.730217 4769 generic.go:334] "Generic (PLEG): container finished" podID="d73449ca-6268-455c-a1b5-79e0c6c79779" containerID="6bd72e0511d734be783f5e28a1ed84a7697a7f172d1dd2e8e3baa137f2897653" exitCode=0 Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.730355 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" event={"ID":"d73449ca-6268-455c-a1b5-79e0c6c79779","Type":"ContainerDied","Data":"6bd72e0511d734be783f5e28a1ed84a7697a7f172d1dd2e8e3baa137f2897653"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.756666 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" event={"ID":"0d5eb36d-4ded-4f1f-916d-140c5f814e27","Type":"ContainerStarted","Data":"3ed3a5f24fcb1a18673dac0499f13c02d13161cc7039e47d0b63798141253535"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.756740 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" event={"ID":"0d5eb36d-4ded-4f1f-916d-140c5f814e27","Type":"ContainerStarted","Data":"0fce1fd8f3e6279f1936a92a79c65c817401fa697b8f4ea55695a0cfd365c4c8"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.795923 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" event={"ID":"446806b9-7941-43d7-885c-61c1d577811d","Type":"ContainerStarted","Data":"b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.796002 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" event={"ID":"446806b9-7941-43d7-885c-61c1d577811d","Type":"ContainerStarted","Data":"ac429310bee3eec5cf21a61ea1b7416c985b29d6fa94570cf51edbd939ff3257"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.796762 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.797169 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.797294 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:47.297260238 +0000 UTC m=+155.882232551 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.797437 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.799725 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:47.299709327 +0000 UTC m=+155.884681640 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.808198 4769 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-zjqhj container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.808277 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" podUID="446806b9-7941-43d7-885c-61c1d577811d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.824403 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr" event={"ID":"c3329bde-d97b-47a5-96dc-a033e3c4bc8c","Type":"ContainerStarted","Data":"fc24b33305f0ec2b5fcc8320e0d29af720bb4cc36c67c54c96f876c649e2251b"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.824463 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr" event={"ID":"c3329bde-d97b-47a5-96dc-a033e3c4bc8c","Type":"ContainerStarted","Data":"15db6dc39ffdc78a6a927d53ef1e24ab975efaa55c2f2d16f9e8d9b41e677a19"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.828424 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" event={"ID":"e483c02c-6538-4d83-89d0-8c474de6c1cb","Type":"ContainerStarted","Data":"34b6d4083b0b7d2347256792b230b895951a5d9a00dcfc1b619bf1623e711159"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.860753 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" event={"ID":"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce","Type":"ContainerStarted","Data":"b1f57b2d40f628dc118a400dc1cd8bbe39642f32869de8c7e665f62b61ebbbb3"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.860818 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" event={"ID":"0a7bb2e3-eb5e-43a5-88f5-df93dff3afce","Type":"ContainerStarted","Data":"6866825894bd33c980220b127b4e0503be9fda2d8e4eb2a6906515167984b6df"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.861281 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.865907 4769 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-d7jmg container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" start-of-body= Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.865955 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" podUID="0a7bb2e3-eb5e-43a5-88f5-df93dff3afce" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": dial tcp 10.217.0.36:5443: connect: connection refused" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.875731 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" event={"ID":"c7d0c499-f7f4-422c-9979-652cbf8c86bf","Type":"ContainerStarted","Data":"e82d2eeaf3c54a3f053204aaf64c3b3422acebb8a1eb253dca8d88541ee22d55"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.881169 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-blk8z" event={"ID":"0183ff9c-f592-4fa2-9775-9385cf6c9499","Type":"ContainerStarted","Data":"3709434e741078198cf64b065c1746421e80778c157d54807540617a98eab6e4"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.898706 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:46 crc kubenswrapper[4769]: E1125 09:46:46.900395 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:47.400346082 +0000 UTC m=+155.985318395 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.945223 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" event={"ID":"8775d5bd-5cae-49f4-9582-a541dfd05c15","Type":"ContainerStarted","Data":"7392bec0cb05c47237db433271c586a87617ca3f7dfaa5b5774e1572c70764cd"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.945596 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" event={"ID":"8775d5bd-5cae-49f4-9582-a541dfd05c15","Type":"ContainerStarted","Data":"358d4b08089379f7e3a3b48b6b150a374b1cb7fa5fff1ec5469f9585167cc0ce"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.954776 4769 patch_prober.go:28] interesting pod/router-default-5444994796-zs9d4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:46:46 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Nov 25 09:46:46 crc kubenswrapper[4769]: [+]process-running ok Nov 25 09:46:46 crc kubenswrapper[4769]: healthz check failed Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.954851 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zs9d4" podUID="bfcd91d2-c8b0-4648-bcc5-207b11a54ece" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.957571 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" event={"ID":"5351ce01-8233-4a92-b15d-befd4e57b0d2","Type":"ContainerStarted","Data":"35cac20f8f637e17bca7d0684594ee9f158bdfd2f5fc1c45a00d097955e8ade9"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.957719 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" event={"ID":"5351ce01-8233-4a92-b15d-befd4e57b0d2","Type":"ContainerStarted","Data":"0d528921e5c0e6b8ce538673032baf8a4107ec30df054535d1921231b1ed353d"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.974679 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" event={"ID":"162f7ecc-6c53-4f08-955e-3ad0e5a7186e","Type":"ContainerStarted","Data":"75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.974743 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" event={"ID":"162f7ecc-6c53-4f08-955e-3ad0e5a7186e","Type":"ContainerStarted","Data":"aac22c91662c7da6d2302a4f752c7333a805bd4ef7791ad2d39e16f5115a95da"} Nov 25 09:46:46 crc kubenswrapper[4769]: I1125 09:46:46.975840 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.002118 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:47 crc kubenswrapper[4769]: E1125 09:46:47.002884 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:47.502863173 +0000 UTC m=+156.087835486 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.003663 4769 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-cv8sw container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.003726 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" podUID="162f7ecc-6c53-4f08-955e-3ad0e5a7186e" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.18:8443/healthz\": dial tcp 10.217.0.18:8443: connect: connection refused" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.012363 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" event={"ID":"72c6ce4d-c36a-4f0e-98d5-217e98231c9f","Type":"ContainerStarted","Data":"e2376b3371b4403cf5e91f37113ab61a625942ab29327c6590c130cccce23bef"} Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.021589 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-hn7p6" event={"ID":"d8add56f-d9a7-4a9a-8f68-b7900166c27f","Type":"ContainerStarted","Data":"3c4c2bc0b148bccbeb93084b7573fa0cb6303014b94f7d83513782a5a1a1149c"} Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.038262 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" event={"ID":"1f4b3ab4-5512-4d2e-ac26-020b304c7cb4","Type":"ContainerStarted","Data":"25a92eba3dc065a19713206876079749e343226c7e060dc1c6d9a2db3122974e"} Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.038406 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-l2kg2" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.048759 4769 patch_prober.go:28] interesting pod/downloads-7954f5f757-l2kg2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.048836 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-l2kg2" podUID="01b73250-0ba8-4c2d-9ada-dacc89a70a7d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.059518 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-xg47p" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.105894 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:47 crc kubenswrapper[4769]: E1125 09:46:47.107529 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:47.607495329 +0000 UTC m=+156.192467642 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.192083 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-vp9pc" podStartSLOduration=130.192065437 podStartE2EDuration="2m10.192065437s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.19139588 +0000 UTC m=+155.776368203" watchObservedRunningTime="2025-11-25 09:46:47.192065437 +0000 UTC m=+155.777037750" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.228922 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:47 crc kubenswrapper[4769]: E1125 09:46:47.229346 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:47.729330909 +0000 UTC m=+156.314303222 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.301264 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" podStartSLOduration=130.301229686 podStartE2EDuration="2m10.301229686s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.266246756 +0000 UTC m=+155.851219069" watchObservedRunningTime="2025-11-25 09:46:47.301229686 +0000 UTC m=+155.886201999" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.330906 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:47 crc kubenswrapper[4769]: E1125 09:46:47.331681 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:47.831648122 +0000 UTC m=+156.416620435 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.345763 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" podStartSLOduration=130.345650796 podStartE2EDuration="2m10.345650796s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.33458206 +0000 UTC m=+155.919554373" watchObservedRunningTime="2025-11-25 09:46:47.345650796 +0000 UTC m=+155.930623119" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.400545 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-qz6sr" podStartSLOduration=130.400523157 podStartE2EDuration="2m10.400523157s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.399689594 +0000 UTC m=+155.984661907" watchObservedRunningTime="2025-11-25 09:46:47.400523157 +0000 UTC m=+155.985495460" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.427819 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.439118 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:47 crc kubenswrapper[4769]: E1125 09:46:47.439496 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:47.939482497 +0000 UTC m=+156.524454810 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.541831 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:47 crc kubenswrapper[4769]: E1125 09:46:47.542382 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:48.042362263 +0000 UTC m=+156.627334576 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.542796 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:47 crc kubenswrapper[4769]: E1125 09:46:47.543211 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:48.043201216 +0000 UTC m=+156.628173529 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.606617 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" podStartSLOduration=130.606592831 podStartE2EDuration="2m10.606592831s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.606302589 +0000 UTC m=+156.191274902" watchObservedRunningTime="2025-11-25 09:46:47.606592831 +0000 UTC m=+156.191565154" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.611034 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" podStartSLOduration=130.611007929 podStartE2EDuration="2m10.611007929s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.563323467 +0000 UTC m=+156.148295780" watchObservedRunningTime="2025-11-25 09:46:47.611007929 +0000 UTC m=+156.195980252" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.644133 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:47 crc kubenswrapper[4769]: E1125 09:46:47.644579 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:48.144561271 +0000 UTC m=+156.729533574 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.662526 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" podStartSLOduration=129.662501454 podStartE2EDuration="2m9.662501454s" podCreationTimestamp="2025-11-25 09:44:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.658895919 +0000 UTC m=+156.243868232" watchObservedRunningTime="2025-11-25 09:46:47.662501454 +0000 UTC m=+156.247473767" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.712589 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-p57pg" podStartSLOduration=130.712561221 podStartE2EDuration="2m10.712561221s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.704954005 +0000 UTC m=+156.289926308" watchObservedRunningTime="2025-11-25 09:46:47.712561221 +0000 UTC m=+156.297533534" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.746023 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:47 crc kubenswrapper[4769]: E1125 09:46:47.746467 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:48.246452747 +0000 UTC m=+156.831425060 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.792264 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-6dlmm" podStartSLOduration=130.792235272 podStartE2EDuration="2m10.792235272s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.791219411 +0000 UTC m=+156.376191724" watchObservedRunningTime="2025-11-25 09:46:47.792235272 +0000 UTC m=+156.377207585" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.846940 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:47 crc kubenswrapper[4769]: E1125 09:46:47.847331 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:48.347312621 +0000 UTC m=+156.932284934 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.910315 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" podStartSLOduration=130.910291959 podStartE2EDuration="2m10.910291959s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.837026796 +0000 UTC m=+156.421999119" watchObservedRunningTime="2025-11-25 09:46:47.910291959 +0000 UTC m=+156.495264272" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.911755 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" podStartSLOduration=129.911745067 podStartE2EDuration="2m9.911745067s" podCreationTimestamp="2025-11-25 09:44:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.91130865 +0000 UTC m=+156.496280963" watchObservedRunningTime="2025-11-25 09:46:47.911745067 +0000 UTC m=+156.496717380" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.949005 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:47 crc kubenswrapper[4769]: E1125 09:46:47.949415 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:48.449402045 +0000 UTC m=+157.034374358 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.966226 4769 patch_prober.go:28] interesting pod/router-default-5444994796-zs9d4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:46:47 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Nov 25 09:46:47 crc kubenswrapper[4769]: [+]process-running ok Nov 25 09:46:47 crc kubenswrapper[4769]: healthz check failed Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.966301 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zs9d4" podUID="bfcd91d2-c8b0-4648-bcc5-207b11a54ece" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.988414 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-l2kg2" podStartSLOduration=130.988394836 podStartE2EDuration="2m10.988394836s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.987521861 +0000 UTC m=+156.572494174" watchObservedRunningTime="2025-11-25 09:46:47.988394836 +0000 UTC m=+156.573367149" Nov 25 09:46:47 crc kubenswrapper[4769]: I1125 09:46:47.989932 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" podStartSLOduration=129.989924177 podStartE2EDuration="2m9.989924177s" podCreationTimestamp="2025-11-25 09:44:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:47.945350041 +0000 UTC m=+156.530322354" watchObservedRunningTime="2025-11-25 09:46:47.989924177 +0000 UTC m=+156.574896490" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.051620 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:48 crc kubenswrapper[4769]: E1125 09:46:48.052566 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:48.552542601 +0000 UTC m=+157.137514914 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.064680 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t7rc7" podStartSLOduration=131.064659199 podStartE2EDuration="2m11.064659199s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:48.015857632 +0000 UTC m=+156.600829955" watchObservedRunningTime="2025-11-25 09:46:48.064659199 +0000 UTC m=+156.649631502" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.080673 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-s289l" event={"ID":"99e5c770-c7b9-496d-93c7-bd8151e1bd49","Type":"ContainerStarted","Data":"1166ad683f85c2f9995c9a44efae0843f88896aec48f83a0b0037cf5c04f53a1"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.116823 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mvk5j" event={"ID":"e483c02c-6538-4d83-89d0-8c474de6c1cb","Type":"ContainerStarted","Data":"196ec03105dfdcd659875465620852d310c7c4b64180f59f076087793acb5dfa"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.150730 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" event={"ID":"d73449ca-6268-455c-a1b5-79e0c6c79779","Type":"ContainerStarted","Data":"4cca5f879914f7a05056d49c7be25bd3e60f53d3bfea4146492f66bb93f56813"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.151045 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.153645 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:48 crc kubenswrapper[4769]: E1125 09:46:48.154068 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:48.654053541 +0000 UTC m=+157.239025854 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.172792 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" event={"ID":"72c6ce4d-c36a-4f0e-98d5-217e98231c9f","Type":"ContainerStarted","Data":"b9ba7d9910008b8db400c00289d75921cef869221231ac0d21c0d4a81869a4af"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.199243 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" podStartSLOduration=131.19918442 podStartE2EDuration="2m11.19918442s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:48.186817581 +0000 UTC m=+156.771789894" watchObservedRunningTime="2025-11-25 09:46:48.19918442 +0000 UTC m=+156.784156733" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.210355 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-blk8z" event={"ID":"0183ff9c-f592-4fa2-9775-9385cf6c9499","Type":"ContainerStarted","Data":"e7be6949baeb4b75ebd74e58e55340dbd60cf568cd4685aa5a8cada0119dc5b6"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.258614 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:48 crc kubenswrapper[4769]: E1125 09:46:48.260125 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:48.760099254 +0000 UTC m=+157.345071567 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.299814 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f8mb8" podStartSLOduration=131.299789384 podStartE2EDuration="2m11.299789384s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:48.234197561 +0000 UTC m=+156.819169884" watchObservedRunningTime="2025-11-25 09:46:48.299789384 +0000 UTC m=+156.884761697" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.301424 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-blk8z" podStartSLOduration=7.301416049 podStartE2EDuration="7.301416049s" podCreationTimestamp="2025-11-25 09:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:48.298633057 +0000 UTC m=+156.883605370" watchObservedRunningTime="2025-11-25 09:46:48.301416049 +0000 UTC m=+156.886388362" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.320792 4769 generic.go:334] "Generic (PLEG): container finished" podID="3ad7cc35-130e-4404-8a4f-173884ab4e41" containerID="594f4c9d7a2016e9ae99bfe4682670c40e8aaf901674d6e5ff235da2a375afc5" exitCode=0 Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.361261 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:48 crc kubenswrapper[4769]: E1125 09:46:48.362504 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:48.86248858 +0000 UTC m=+157.447460893 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.429233 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" podStartSLOduration=108.429216679 podStartE2EDuration="1m48.429216679s" podCreationTimestamp="2025-11-25 09:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:48.427664477 +0000 UTC m=+157.012636790" watchObservedRunningTime="2025-11-25 09:46:48.429216679 +0000 UTC m=+157.014188982" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.430326 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" podStartSLOduration=131.430320964 podStartE2EDuration="2m11.430320964s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:48.326516111 +0000 UTC m=+156.911488434" watchObservedRunningTime="2025-11-25 09:46:48.430320964 +0000 UTC m=+157.015293277" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.469992 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" podStartSLOduration=130.46994657 podStartE2EDuration="2m10.46994657s" podCreationTimestamp="2025-11-25 09:44:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:48.467636317 +0000 UTC m=+157.052608630" watchObservedRunningTime="2025-11-25 09:46:48.46994657 +0000 UTC m=+157.054918883" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.473903 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:48 crc kubenswrapper[4769]: E1125 09:46:48.475301 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:48.975279555 +0000 UTC m=+157.560251868 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.525454 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" podStartSLOduration=131.525435676 podStartE2EDuration="2m11.525435676s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:48.52427794 +0000 UTC m=+157.109250253" watchObservedRunningTime="2025-11-25 09:46:48.525435676 +0000 UTC m=+157.110407989" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.579707 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:48 crc kubenswrapper[4769]: E1125 09:46:48.580298 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:49.080276826 +0000 UTC m=+157.665249139 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.621579 4769 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-rdchw container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.621621 4769 patch_prober.go:28] interesting pod/downloads-7954f5f757-l2kg2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.621634 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" podUID="d9929f0c-776d-4583-94ca-bd665b5d9783" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.621684 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-l2kg2" podUID="01b73250-0ba8-4c2d-9ada-dacc89a70a7d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.687880 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:48 crc kubenswrapper[4769]: E1125 09:46:48.689037 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:49.189015958 +0000 UTC m=+157.773988271 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.711568 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" podStartSLOduration=130.711545916 podStartE2EDuration="2m10.711545916s" podCreationTimestamp="2025-11-25 09:44:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:48.629873635 +0000 UTC m=+157.214845948" watchObservedRunningTime="2025-11-25 09:46:48.711545916 +0000 UTC m=+157.296518219" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.801047 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.801580 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" podStartSLOduration=130.801552423 podStartE2EDuration="2m10.801552423s" podCreationTimestamp="2025-11-25 09:44:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:48.799520981 +0000 UTC m=+157.384493304" watchObservedRunningTime="2025-11-25 09:46:48.801552423 +0000 UTC m=+157.386524736" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.802649 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" podStartSLOduration=131.802642377 podStartE2EDuration="2m11.802642377s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:48.712984044 +0000 UTC m=+157.297956357" watchObservedRunningTime="2025-11-25 09:46:48.802642377 +0000 UTC m=+157.387614690" Nov 25 09:46:48 crc kubenswrapper[4769]: E1125 09:46:48.810806 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:49.310784815 +0000 UTC m=+157.895757128 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.902475 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:48 crc kubenswrapper[4769]: E1125 09:46:48.902870 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:49.402851314 +0000 UTC m=+157.987823627 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911172 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911216 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-v4q28" event={"ID":"0d5eb36d-4ded-4f1f-916d-140c5f814e27","Type":"ContainerStarted","Data":"7382b6a287ae89165b3bb5f09086745d591e363b723f8fc4f58686427ccc8540"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911243 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" event={"ID":"21dad565-8414-4796-b777-e63f27b9c666","Type":"ContainerStarted","Data":"039423952c8f0d9590aaeeab3b5c963aa92c26970c18c88a2a596c36db833521"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911256 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911270 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-g58mb" event={"ID":"66cdfd99-a795-4a7d-bdc4-fdb11a2bba75","Type":"ContainerStarted","Data":"640e622ae8aca02109aae209eae986b3764f870b5b005ec7f8a4d1250eedaf63"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911280 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" event={"ID":"b36eb87e-d18f-43f7-bd81-ffd76a8975d1","Type":"ContainerStarted","Data":"3cd7f68d517cc18b829538174de8e38f09826d9dc80f58fd8a728e87e833e473"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911296 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911305 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" event={"ID":"b36eb87e-d18f-43f7-bd81-ffd76a8975d1","Type":"ContainerStarted","Data":"5b877be1fea2f54f42770053fddff7b2cefaf00c8defe6ff98e8de27c16a00cb"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911317 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" event={"ID":"3ad7cc35-130e-4404-8a4f-173884ab4e41","Type":"ContainerDied","Data":"594f4c9d7a2016e9ae99bfe4682670c40e8aaf901674d6e5ff235da2a375afc5"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911334 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911347 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-twb6l" event={"ID":"5351ce01-8233-4a92-b15d-befd4e57b0d2","Type":"ContainerStarted","Data":"184cf666054f75442dc4541bdbd21a81272926afc26683fa4f1d3a94601f5116"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911364 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" event={"ID":"c6742b80-a34d-4998-832f-29c88178b4ba","Type":"ContainerStarted","Data":"02ca0cb2f8887335b4e07693096b74e90e224a63f0eb1dcaa3e6e0927df42285"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911404 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911432 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-wcj44" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911463 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911482 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-shvhw" event={"ID":"08c157ad-8d10-4284-a4e4-eb0b55a56542","Type":"ContainerStarted","Data":"c15a2904e9424e9d88e9d7a14f870cac349480348687ffe2d940d2dca0647b23"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911501 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-wvj7p" event={"ID":"c7d0c499-f7f4-422c-9979-652cbf8c86bf","Type":"ContainerStarted","Data":"f9292dbfc93b22f92800b687fff3f71bd0a12ac4e1ff639ccf922f8d9f255193"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911512 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-8ft75" event={"ID":"d1f7b13d-54c0-4876-bccc-723388dee681","Type":"ContainerStarted","Data":"8df8c5e3a61499850a3aa130b76c040864e66e87fd1b377f90d41982ef14b029"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911536 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911548 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-qthpd" event={"ID":"4ed994f7-d70c-4220-a142-f23cb0550377","Type":"ContainerStarted","Data":"e98bd04eecf5439d20276753ab801b5c79db6d975130b44ae27cab5986f43784"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911559 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" event={"ID":"1f4b3ab4-5512-4d2e-ac26-020b304c7cb4","Type":"ContainerStarted","Data":"856055a30affbd333f03ff1bff186fe8a59583d400b086d8da0afc3f5e6826ed"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911571 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" event={"ID":"014bf10b-9df2-4cdf-b1e7-f852a56044cc","Type":"ContainerStarted","Data":"c90575cfa3ca8cc2bad5faf6fc015eee37804bc606042dbf0c20fea811dbae09"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911580 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" event={"ID":"da8fd0f6-4f81-4bd2-8c8a-3d62ac19e209","Type":"ContainerStarted","Data":"a3ad135aade0bd5e4f34b559e974fb2267e64e44c69584fa1f090199a56547bb"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911592 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4w9xd" event={"ID":"326e56a0-cea9-40a2-ac71-cac257ebf902","Type":"ContainerStarted","Data":"565084ba393579954c84579339984dc9a286ee230299dce8264cbe771b72a55f"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.911602 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-hn7p6" event={"ID":"d8add56f-d9a7-4a9a-8f68-b7900166c27f","Type":"ContainerStarted","Data":"a8981004f12f25d3dcee069e00e3df64bf7964cc92d5ca150571f0bc29c1d706"} Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.949629 4769 patch_prober.go:28] interesting pod/router-default-5444994796-zs9d4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:46:48 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Nov 25 09:46:48 crc kubenswrapper[4769]: [+]process-running ok Nov 25 09:46:48 crc kubenswrapper[4769]: healthz check failed Nov 25 09:46:48 crc kubenswrapper[4769]: I1125 09:46:48.949741 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zs9d4" podUID="bfcd91d2-c8b0-4648-bcc5-207b11a54ece" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.002687 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-shvhw" podStartSLOduration=132.002661406 podStartE2EDuration="2m12.002661406s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:48.953976235 +0000 UTC m=+157.538948558" watchObservedRunningTime="2025-11-25 09:46:49.002661406 +0000 UTC m=+157.587633719" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.003829 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:49 crc kubenswrapper[4769]: E1125 09:46:49.004230 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:49.504215129 +0000 UTC m=+158.089187442 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.106239 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:49 crc kubenswrapper[4769]: E1125 09:46:49.106882 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:49.606852005 +0000 UTC m=+158.191824318 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.129583 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nk6zl" podStartSLOduration=132.129351411 podStartE2EDuration="2m12.129351411s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:49.002598464 +0000 UTC m=+157.587570777" watchObservedRunningTime="2025-11-25 09:46:49.129351411 +0000 UTC m=+157.714323724" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.130479 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-4w9xd" podStartSLOduration=132.130411894 podStartE2EDuration="2m12.130411894s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:49.098882034 +0000 UTC m=+157.683854357" watchObservedRunningTime="2025-11-25 09:46:49.130411894 +0000 UTC m=+157.715384197" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.208219 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:49 crc kubenswrapper[4769]: E1125 09:46:49.209063 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:49.709047002 +0000 UTC m=+158.294019315 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.274636 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-cnv9g" podStartSLOduration=131.274603833 podStartE2EDuration="2m11.274603833s" podCreationTimestamp="2025-11-25 09:44:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:49.242526621 +0000 UTC m=+157.827498934" watchObservedRunningTime="2025-11-25 09:46:49.274603833 +0000 UTC m=+157.859576146" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.275832 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c4dbm"] Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.276931 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.292746 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.300918 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-82vd5" podStartSLOduration=132.300893423 podStartE2EDuration="2m12.300893423s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:49.296025467 +0000 UTC m=+157.880997780" watchObservedRunningTime="2025-11-25 09:46:49.300893423 +0000 UTC m=+157.885865736" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.301030 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c4dbm"] Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.309793 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:49 crc kubenswrapper[4769]: E1125 09:46:49.310303 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:49.810285211 +0000 UTC m=+158.395257524 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.415111 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.415200 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8cf525e-b1f8-447d-bde3-7a8746836e39-catalog-content\") pod \"certified-operators-c4dbm\" (UID: \"b8cf525e-b1f8-447d-bde3-7a8746836e39\") " pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.415221 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtmmr\" (UniqueName: \"kubernetes.io/projected/b8cf525e-b1f8-447d-bde3-7a8746836e39-kube-api-access-qtmmr\") pod \"certified-operators-c4dbm\" (UID: \"b8cf525e-b1f8-447d-bde3-7a8746836e39\") " pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.415252 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8cf525e-b1f8-447d-bde3-7a8746836e39-utilities\") pod \"certified-operators-c4dbm\" (UID: \"b8cf525e-b1f8-447d-bde3-7a8746836e39\") " pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:46:49 crc kubenswrapper[4769]: E1125 09:46:49.415679 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:49.915662458 +0000 UTC m=+158.500634771 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.499540 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7m4kh"] Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.500667 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.509516 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.519752 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.520046 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8cf525e-b1f8-447d-bde3-7a8746836e39-catalog-content\") pod \"certified-operators-c4dbm\" (UID: \"b8cf525e-b1f8-447d-bde3-7a8746836e39\") " pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.520078 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtmmr\" (UniqueName: \"kubernetes.io/projected/b8cf525e-b1f8-447d-bde3-7a8746836e39-kube-api-access-qtmmr\") pod \"certified-operators-c4dbm\" (UID: \"b8cf525e-b1f8-447d-bde3-7a8746836e39\") " pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.520113 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8cf525e-b1f8-447d-bde3-7a8746836e39-utilities\") pod \"certified-operators-c4dbm\" (UID: \"b8cf525e-b1f8-447d-bde3-7a8746836e39\") " pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.520588 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8cf525e-b1f8-447d-bde3-7a8746836e39-utilities\") pod \"certified-operators-c4dbm\" (UID: \"b8cf525e-b1f8-447d-bde3-7a8746836e39\") " pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.520746 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8cf525e-b1f8-447d-bde3-7a8746836e39-catalog-content\") pod \"certified-operators-c4dbm\" (UID: \"b8cf525e-b1f8-447d-bde3-7a8746836e39\") " pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:46:49 crc kubenswrapper[4769]: E1125 09:46:49.520884 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:50.020857476 +0000 UTC m=+158.605829789 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.581293 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtmmr\" (UniqueName: \"kubernetes.io/projected/b8cf525e-b1f8-447d-bde3-7a8746836e39-kube-api-access-qtmmr\") pod \"certified-operators-c4dbm\" (UID: \"b8cf525e-b1f8-447d-bde3-7a8746836e39\") " pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.604585 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7m4kh"] Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.609799 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.621891 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.621982 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-catalog-content\") pod \"community-operators-7m4kh\" (UID: \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\") " pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.622022 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-utilities\") pod \"community-operators-7m4kh\" (UID: \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\") " pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.622058 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zccjr\" (UniqueName: \"kubernetes.io/projected/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-kube-api-access-zccjr\") pod \"community-operators-7m4kh\" (UID: \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\") " pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:46:49 crc kubenswrapper[4769]: E1125 09:46:49.622442 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:50.122425379 +0000 UTC m=+158.707397692 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.622656 4769 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-d7jmg container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.622691 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" podUID="0a7bb2e3-eb5e-43a5-88f5-df93dff3afce" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.36:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.656185 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-hn7p6" event={"ID":"d8add56f-d9a7-4a9a-8f68-b7900166c27f","Type":"ContainerStarted","Data":"77c38b6d7883622bfd8862e837d6d2d085c891d06db76439aa43a87de70e2c3e"} Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.657504 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.689341 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" event={"ID":"c6742b80-a34d-4998-832f-29c88178b4ba","Type":"ContainerStarted","Data":"8b032fc03d7ed5d14fd8cdcefced627859529a73b502aee869780f0da42ca2d4"} Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.691479 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-sx97n" event={"ID":"86a2de7a-5314-47ef-b827-92050023a677","Type":"ContainerStarted","Data":"5946e8e20e59b2cf4fe3fb67009ad970b47eb884830e31bcaf5947a1596dbc14"} Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.693535 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qmkgl"] Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.694893 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" event={"ID":"3ad7cc35-130e-4404-8a4f-173884ab4e41","Type":"ContainerStarted","Data":"cfcc4fe4020bddaed922aa01c58a0bcbfefb52549fee20c9dd90add5052bc0e3"} Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.705540 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.712544 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-hn7p6" podStartSLOduration=8.712513629 podStartE2EDuration="8.712513629s" podCreationTimestamp="2025-11-25 09:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:49.705173354 +0000 UTC m=+158.290145667" watchObservedRunningTime="2025-11-25 09:46:49.712513629 +0000 UTC m=+158.297485942" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.718581 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-s289l" event={"ID":"99e5c770-c7b9-496d-93c7-bd8151e1bd49","Type":"ContainerStarted","Data":"1402836b4e43d979c27cebc5acc2ee28121c41bca1545d703d6dae82bd8c55f0"} Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.720120 4769 patch_prober.go:28] interesting pod/downloads-7954f5f757-l2kg2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.720178 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-l2kg2" podUID="01b73250-0ba8-4c2d-9ada-dacc89a70a7d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.720127 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qmkgl"] Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.724852 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:49 crc kubenswrapper[4769]: E1125 09:46:49.725566 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:50.225546684 +0000 UTC m=+158.810518997 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.726159 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-utilities\") pod \"community-operators-7m4kh\" (UID: \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\") " pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.726242 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zccjr\" (UniqueName: \"kubernetes.io/projected/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-kube-api-access-zccjr\") pod \"community-operators-7m4kh\" (UID: \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\") " pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.726316 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.726373 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-catalog-content\") pod \"community-operators-7m4kh\" (UID: \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\") " pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.727443 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-utilities\") pod \"community-operators-7m4kh\" (UID: \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\") " pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:46:49 crc kubenswrapper[4769]: E1125 09:46:49.728929 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:50.22891982 +0000 UTC m=+158.813892133 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.729699 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-catalog-content\") pod \"community-operators-7m4kh\" (UID: \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\") " pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.751704 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" podStartSLOduration=131.751683128 podStartE2EDuration="2m11.751683128s" podCreationTimestamp="2025-11-25 09:44:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:49.750598274 +0000 UTC m=+158.335570607" watchObservedRunningTime="2025-11-25 09:46:49.751683128 +0000 UTC m=+158.336655441" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.788794 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zccjr\" (UniqueName: \"kubernetes.io/projected/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-kube-api-access-zccjr\") pod \"community-operators-7m4kh\" (UID: \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\") " pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.831598 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.832209 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-catalog-content\") pod \"certified-operators-qmkgl\" (UID: \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\") " pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.832314 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-utilities\") pod \"certified-operators-qmkgl\" (UID: \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\") " pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.832526 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f7f5\" (UniqueName: \"kubernetes.io/projected/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-kube-api-access-2f7f5\") pod \"certified-operators-qmkgl\" (UID: \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\") " pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:46:49 crc kubenswrapper[4769]: E1125 09:46:49.834847 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:50.334821598 +0000 UTC m=+158.919793911 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.879499 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.880156 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-skhr4" podStartSLOduration=131.880129493 podStartE2EDuration="2m11.880129493s" podCreationTimestamp="2025-11-25 09:44:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:49.843128422 +0000 UTC m=+158.428100736" watchObservedRunningTime="2025-11-25 09:46:49.880129493 +0000 UTC m=+158.465101806" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.917113 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x7wr9"] Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.946457 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.946501 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-catalog-content\") pod \"certified-operators-qmkgl\" (UID: \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\") " pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.946532 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-utilities\") pod \"certified-operators-qmkgl\" (UID: \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\") " pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.946566 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f7f5\" (UniqueName: \"kubernetes.io/projected/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-kube-api-access-2f7f5\") pod \"certified-operators-qmkgl\" (UID: \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\") " pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.948158 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-catalog-content\") pod \"certified-operators-qmkgl\" (UID: \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\") " pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:46:49 crc kubenswrapper[4769]: E1125 09:46:49.948566 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:50.44854894 +0000 UTC m=+159.033521253 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.948997 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-utilities\") pod \"certified-operators-qmkgl\" (UID: \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\") " pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.950547 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x7wr9"] Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.950843 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.956172 4769 patch_prober.go:28] interesting pod/router-default-5444994796-zs9d4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:46:49 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Nov 25 09:46:49 crc kubenswrapper[4769]: [+]process-running ok Nov 25 09:46:49 crc kubenswrapper[4769]: healthz check failed Nov 25 09:46:49 crc kubenswrapper[4769]: I1125 09:46:49.956239 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zs9d4" podUID="bfcd91d2-c8b0-4648-bcc5-207b11a54ece" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.019747 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f7f5\" (UniqueName: \"kubernetes.io/projected/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-kube-api-access-2f7f5\") pod \"certified-operators-qmkgl\" (UID: \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\") " pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.036133 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-s289l" podStartSLOduration=133.036101258 podStartE2EDuration="2m13.036101258s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:50.015518599 +0000 UTC m=+158.600490912" watchObservedRunningTime="2025-11-25 09:46:50.036101258 +0000 UTC m=+158.621073571" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.047582 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.047850 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1f73c3a-3ade-470b-9e77-000116d1b631-utilities\") pod \"community-operators-x7wr9\" (UID: \"f1f73c3a-3ade-470b-9e77-000116d1b631\") " pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.047878 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1f73c3a-3ade-470b-9e77-000116d1b631-catalog-content\") pod \"community-operators-x7wr9\" (UID: \"f1f73c3a-3ade-470b-9e77-000116d1b631\") " pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.047928 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6v9s8\" (UniqueName: \"kubernetes.io/projected/f1f73c3a-3ade-470b-9e77-000116d1b631-kube-api-access-6v9s8\") pod \"community-operators-x7wr9\" (UID: \"f1f73c3a-3ade-470b-9e77-000116d1b631\") " pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:46:50 crc kubenswrapper[4769]: E1125 09:46:50.048060 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:50.5480426 +0000 UTC m=+159.133014913 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.048190 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.151045 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1f73c3a-3ade-470b-9e77-000116d1b631-utilities\") pod \"community-operators-x7wr9\" (UID: \"f1f73c3a-3ade-470b-9e77-000116d1b631\") " pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.151323 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1f73c3a-3ade-470b-9e77-000116d1b631-catalog-content\") pod \"community-operators-x7wr9\" (UID: \"f1f73c3a-3ade-470b-9e77-000116d1b631\") " pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.151359 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.151387 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6v9s8\" (UniqueName: \"kubernetes.io/projected/f1f73c3a-3ade-470b-9e77-000116d1b631-kube-api-access-6v9s8\") pod \"community-operators-x7wr9\" (UID: \"f1f73c3a-3ade-470b-9e77-000116d1b631\") " pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.152080 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1f73c3a-3ade-470b-9e77-000116d1b631-utilities\") pod \"community-operators-x7wr9\" (UID: \"f1f73c3a-3ade-470b-9e77-000116d1b631\") " pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.152801 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1f73c3a-3ade-470b-9e77-000116d1b631-catalog-content\") pod \"community-operators-x7wr9\" (UID: \"f1f73c3a-3ade-470b-9e77-000116d1b631\") " pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:46:50 crc kubenswrapper[4769]: E1125 09:46:50.153092 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:50.653080862 +0000 UTC m=+159.238053175 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.205830 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6v9s8\" (UniqueName: \"kubernetes.io/projected/f1f73c3a-3ade-470b-9e77-000116d1b631-kube-api-access-6v9s8\") pod \"community-operators-x7wr9\" (UID: \"f1f73c3a-3ade-470b-9e77-000116d1b631\") " pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.252106 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:50 crc kubenswrapper[4769]: E1125 09:46:50.252577 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:50.75254778 +0000 UTC m=+159.337520093 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.354092 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:50 crc kubenswrapper[4769]: E1125 09:46:50.354473 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:50.854460377 +0000 UTC m=+159.439432690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.378346 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.417865 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c4dbm"] Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.463536 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:50 crc kubenswrapper[4769]: E1125 09:46:50.463934 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:50.963914667 +0000 UTC m=+159.548886980 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.565489 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:50 crc kubenswrapper[4769]: E1125 09:46:50.565880 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:51.065866946 +0000 UTC m=+159.650839259 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.635904 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7m4kh"] Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.681277 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:50 crc kubenswrapper[4769]: E1125 09:46:50.681485 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:51.181451623 +0000 UTC m=+159.766423936 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.681644 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:50 crc kubenswrapper[4769]: E1125 09:46:50.682014 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:51.182000945 +0000 UTC m=+159.766973258 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.784809 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:50 crc kubenswrapper[4769]: E1125 09:46:50.785243 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:51.285223895 +0000 UTC m=+159.870196208 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.814432 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-sx97n" event={"ID":"86a2de7a-5314-47ef-b827-92050023a677","Type":"ContainerStarted","Data":"7cd62a60ebc8760512723e78123f6097881253dcc2d5b9bcc70dac2493b30496"} Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.826362 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7m4kh" event={"ID":"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6","Type":"ContainerStarted","Data":"257dc786b88284fdb335d6bb8b664924b92e294e08cd8b9dd79c5cc73ef9a47c"} Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.835909 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4dbm" event={"ID":"b8cf525e-b1f8-447d-bde3-7a8746836e39","Type":"ContainerStarted","Data":"16f618697d04c78e8cc801a034937f7a127461d56feb7f497c52ff84387864a0"} Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.887915 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qmkgl"] Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.892019 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:50 crc kubenswrapper[4769]: E1125 09:46:50.897251 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:51.397226088 +0000 UTC m=+159.982198401 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:50 crc kubenswrapper[4769]: W1125 09:46:50.927218 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c9095dc_a1a8_4e5c_a357_dae9aaf966e1.slice/crio-b36b73ca0680f528d6be854b17587d8659daa59a509baffc66fbee88b8b8dcec WatchSource:0}: Error finding container b36b73ca0680f528d6be854b17587d8659daa59a509baffc66fbee88b8b8dcec: Status 404 returned error can't find the container with id b36b73ca0680f528d6be854b17587d8659daa59a509baffc66fbee88b8b8dcec Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.958405 4769 patch_prober.go:28] interesting pod/router-default-5444994796-zs9d4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:46:50 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Nov 25 09:46:50 crc kubenswrapper[4769]: [+]process-running ok Nov 25 09:46:50 crc kubenswrapper[4769]: healthz check failed Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.958480 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zs9d4" podUID="bfcd91d2-c8b0-4648-bcc5-207b11a54ece" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:46:50 crc kubenswrapper[4769]: I1125 09:46:50.995486 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:50 crc kubenswrapper[4769]: E1125 09:46:50.996008 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:51.495983447 +0000 UTC m=+160.080955760 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.101087 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:51 crc kubenswrapper[4769]: E1125 09:46:51.101792 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:51.60177795 +0000 UTC m=+160.186750263 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.210002 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:51 crc kubenswrapper[4769]: E1125 09:46:51.212607 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:51.712581145 +0000 UTC m=+160.297553458 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.268272 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7kqx5"] Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.269422 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.276443 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.295538 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7kqx5"] Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.314989 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:51 crc kubenswrapper[4769]: E1125 09:46:51.315361 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:51.815347176 +0000 UTC m=+160.400319479 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.410029 4769 patch_prober.go:28] interesting pod/apiserver-76f77b778f-shvhw container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 09:46:51 crc kubenswrapper[4769]: [+]log ok Nov 25 09:46:51 crc kubenswrapper[4769]: [+]etcd ok Nov 25 09:46:51 crc kubenswrapper[4769]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 09:46:51 crc kubenswrapper[4769]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 09:46:51 crc kubenswrapper[4769]: [+]poststarthook/max-in-flight-filter ok Nov 25 09:46:51 crc kubenswrapper[4769]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 09:46:51 crc kubenswrapper[4769]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 25 09:46:51 crc kubenswrapper[4769]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 25 09:46:51 crc kubenswrapper[4769]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 25 09:46:51 crc kubenswrapper[4769]: [+]poststarthook/project.openshift.io-projectcache ok Nov 25 09:46:51 crc kubenswrapper[4769]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 25 09:46:51 crc kubenswrapper[4769]: [+]poststarthook/openshift.io-startinformers ok Nov 25 09:46:51 crc kubenswrapper[4769]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 25 09:46:51 crc kubenswrapper[4769]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 09:46:51 crc kubenswrapper[4769]: livez check failed Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.410099 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-shvhw" podUID="08c157ad-8d10-4284-a4e4-eb0b55a56542" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.416741 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.417038 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-catalog-content\") pod \"redhat-marketplace-7kqx5\" (UID: \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\") " pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.417088 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-utilities\") pod \"redhat-marketplace-7kqx5\" (UID: \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\") " pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.417148 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbhv6\" (UniqueName: \"kubernetes.io/projected/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-kube-api-access-tbhv6\") pod \"redhat-marketplace-7kqx5\" (UID: \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\") " pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:46:51 crc kubenswrapper[4769]: E1125 09:46:51.417275 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:51.917257663 +0000 UTC m=+160.502229976 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.509420 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x7wr9"] Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.518502 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbhv6\" (UniqueName: \"kubernetes.io/projected/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-kube-api-access-tbhv6\") pod \"redhat-marketplace-7kqx5\" (UID: \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\") " pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.518564 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.518606 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-catalog-content\") pod \"redhat-marketplace-7kqx5\" (UID: \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\") " pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.518638 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-utilities\") pod \"redhat-marketplace-7kqx5\" (UID: \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\") " pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.519142 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-utilities\") pod \"redhat-marketplace-7kqx5\" (UID: \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\") " pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:46:51 crc kubenswrapper[4769]: E1125 09:46:51.519646 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:52.019621458 +0000 UTC m=+160.604593771 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.519659 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-catalog-content\") pod \"redhat-marketplace-7kqx5\" (UID: \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\") " pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.579260 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbhv6\" (UniqueName: \"kubernetes.io/projected/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-kube-api-access-tbhv6\") pod \"redhat-marketplace-7kqx5\" (UID: \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\") " pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.616311 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.619279 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:51 crc kubenswrapper[4769]: E1125 09:46:51.619458 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:52.1194294 +0000 UTC m=+160.704401723 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.619601 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:51 crc kubenswrapper[4769]: E1125 09:46:51.620026 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:52.120012683 +0000 UTC m=+160.704985016 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.663227 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l662h"] Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.665768 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.691747 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l662h"] Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.722034 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.722587 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a51756e2-325b-4979-9e50-2a5cf4ca302f-utilities\") pod \"redhat-marketplace-l662h\" (UID: \"a51756e2-325b-4979-9e50-2a5cf4ca302f\") " pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.722766 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a51756e2-325b-4979-9e50-2a5cf4ca302f-catalog-content\") pod \"redhat-marketplace-l662h\" (UID: \"a51756e2-325b-4979-9e50-2a5cf4ca302f\") " pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.723081 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzflc\" (UniqueName: \"kubernetes.io/projected/a51756e2-325b-4979-9e50-2a5cf4ca302f-kube-api-access-kzflc\") pod \"redhat-marketplace-l662h\" (UID: \"a51756e2-325b-4979-9e50-2a5cf4ca302f\") " pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:46:51 crc kubenswrapper[4769]: E1125 09:46:51.723295 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:52.223275154 +0000 UTC m=+160.808247467 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.806177 4769 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.825067 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a51756e2-325b-4979-9e50-2a5cf4ca302f-utilities\") pod \"redhat-marketplace-l662h\" (UID: \"a51756e2-325b-4979-9e50-2a5cf4ca302f\") " pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.825114 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a51756e2-325b-4979-9e50-2a5cf4ca302f-catalog-content\") pod \"redhat-marketplace-l662h\" (UID: \"a51756e2-325b-4979-9e50-2a5cf4ca302f\") " pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.825154 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.825192 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzflc\" (UniqueName: \"kubernetes.io/projected/a51756e2-325b-4979-9e50-2a5cf4ca302f-kube-api-access-kzflc\") pod \"redhat-marketplace-l662h\" (UID: \"a51756e2-325b-4979-9e50-2a5cf4ca302f\") " pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.825872 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a51756e2-325b-4979-9e50-2a5cf4ca302f-utilities\") pod \"redhat-marketplace-l662h\" (UID: \"a51756e2-325b-4979-9e50-2a5cf4ca302f\") " pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.826113 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a51756e2-325b-4979-9e50-2a5cf4ca302f-catalog-content\") pod \"redhat-marketplace-l662h\" (UID: \"a51756e2-325b-4979-9e50-2a5cf4ca302f\") " pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:46:51 crc kubenswrapper[4769]: E1125 09:46:51.826400 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:52.326387939 +0000 UTC m=+160.911360252 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.848765 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x7wr9" event={"ID":"f1f73c3a-3ade-470b-9e77-000116d1b631","Type":"ContainerStarted","Data":"e35295c12828caa0c01012f94aad1e2010b2f54dff2d00656b24b17f84730206"} Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.848819 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x7wr9" event={"ID":"f1f73c3a-3ade-470b-9e77-000116d1b631","Type":"ContainerStarted","Data":"c6cb8737bbf99da5e6f26c307c7404be98d80c43881793a90b4cc3a26df66d24"} Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.855432 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzflc\" (UniqueName: \"kubernetes.io/projected/a51756e2-325b-4979-9e50-2a5cf4ca302f-kube-api-access-kzflc\") pod \"redhat-marketplace-l662h\" (UID: \"a51756e2-325b-4979-9e50-2a5cf4ca302f\") " pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.875072 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-sx97n" event={"ID":"86a2de7a-5314-47ef-b827-92050023a677","Type":"ContainerStarted","Data":"ed8e1d525df984f4d4a232d0708819bdb08d676c7054c86408300c68691e6f57"} Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.875387 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-sx97n" event={"ID":"86a2de7a-5314-47ef-b827-92050023a677","Type":"ContainerStarted","Data":"1a1e7e8d1fb82a3bef1bf39b75e4b6aa0027503101683d1abcc092dbf6a4bc77"} Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.884603 4769 generic.go:334] "Generic (PLEG): container finished" podID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" containerID="009db9fdf743e5c00055bd57aa7943218b5e407657e21873df38b85057ce7480" exitCode=0 Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.884684 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmkgl" event={"ID":"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1","Type":"ContainerDied","Data":"009db9fdf743e5c00055bd57aa7943218b5e407657e21873df38b85057ce7480"} Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.884717 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmkgl" event={"ID":"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1","Type":"ContainerStarted","Data":"b36b73ca0680f528d6be854b17587d8659daa59a509baffc66fbee88b8b8dcec"} Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.890628 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.893876 4769 generic.go:334] "Generic (PLEG): container finished" podID="1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" containerID="0bc46cb34b08e98904adb0da4cb4635afbce4f71893a2e07343284a86268f8bd" exitCode=0 Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.893935 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7m4kh" event={"ID":"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6","Type":"ContainerDied","Data":"0bc46cb34b08e98904adb0da4cb4635afbce4f71893a2e07343284a86268f8bd"} Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.902885 4769 generic.go:334] "Generic (PLEG): container finished" podID="b8cf525e-b1f8-447d-bde3-7a8746836e39" containerID="7b6e771a1d7c47975b55ae24ad93520f1bd8aec25575ac25091a5d283bbfc09b" exitCode=0 Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.903011 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4dbm" event={"ID":"b8cf525e-b1f8-447d-bde3-7a8746836e39","Type":"ContainerDied","Data":"7b6e771a1d7c47975b55ae24ad93520f1bd8aec25575ac25091a5d283bbfc09b"} Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.926535 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:51 crc kubenswrapper[4769]: E1125 09:46:51.928160 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:52.428140119 +0000 UTC m=+161.013112432 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.942658 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-sx97n" podStartSLOduration=10.942637113 podStartE2EDuration="10.942637113s" podCreationTimestamp="2025-11-25 09:46:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:51.942364472 +0000 UTC m=+160.527336795" watchObservedRunningTime="2025-11-25 09:46:51.942637113 +0000 UTC m=+160.527609426" Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.948579 4769 patch_prober.go:28] interesting pod/router-default-5444994796-zs9d4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:46:51 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Nov 25 09:46:51 crc kubenswrapper[4769]: [+]process-running ok Nov 25 09:46:51 crc kubenswrapper[4769]: healthz check failed Nov 25 09:46:51 crc kubenswrapper[4769]: I1125 09:46:51.948995 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zs9d4" podUID="bfcd91d2-c8b0-4648-bcc5-207b11a54ece" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.030880 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:52 crc kubenswrapper[4769]: E1125 09:46:52.032468 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:52.532452803 +0000 UTC m=+161.117425116 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.034500 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.133773 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:52 crc kubenswrapper[4769]: E1125 09:46:52.134297 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:52.634278596 +0000 UTC m=+161.219250909 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.169952 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.177062 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.177236 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.183458 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.189670 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.235335 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9ccae56f-65fc-4548-a179-e4fbd7b24716-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9ccae56f-65fc-4548-a179-e4fbd7b24716\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.235398 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9ccae56f-65fc-4548-a179-e4fbd7b24716-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9ccae56f-65fc-4548-a179-e4fbd7b24716\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.235482 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:52 crc kubenswrapper[4769]: E1125 09:46:52.235866 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:52.735834298 +0000 UTC m=+161.320806611 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.264652 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7kqx5"] Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.290735 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.290929 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.338701 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.339025 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9ccae56f-65fc-4548-a179-e4fbd7b24716-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9ccae56f-65fc-4548-a179-e4fbd7b24716\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.339832 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9ccae56f-65fc-4548-a179-e4fbd7b24716-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9ccae56f-65fc-4548-a179-e4fbd7b24716\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:46:52 crc kubenswrapper[4769]: E1125 09:46:52.340328 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 09:46:52.840312038 +0000 UTC m=+161.425284351 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.340442 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9ccae56f-65fc-4548-a179-e4fbd7b24716-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"9ccae56f-65fc-4548-a179-e4fbd7b24716\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.405075 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9ccae56f-65fc-4548-a179-e4fbd7b24716-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"9ccae56f-65fc-4548-a179-e4fbd7b24716\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.415056 4769 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-25T09:46:51.806211946Z","Handler":null,"Name":""} Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.443138 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:52 crc kubenswrapper[4769]: E1125 09:46:52.443529 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 09:46:52.943516487 +0000 UTC m=+161.528488800 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-xrd6w" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.446531 4769 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.446570 4769 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.488495 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gfmk8"] Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.509903 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.511582 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gfmk8"] Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.511932 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.543900 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.559529 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.641622 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-bd82s" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.645448 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fs2xl\" (UniqueName: \"kubernetes.io/projected/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-kube-api-access-fs2xl\") pod \"redhat-operators-gfmk8\" (UID: \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\") " pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.645547 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-catalog-content\") pod \"redhat-operators-gfmk8\" (UID: \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\") " pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.645590 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-utilities\") pod \"redhat-operators-gfmk8\" (UID: \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\") " pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.703478 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l662h"] Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.756880 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fs2xl\" (UniqueName: \"kubernetes.io/projected/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-kube-api-access-fs2xl\") pod \"redhat-operators-gfmk8\" (UID: \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\") " pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.757445 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-catalog-content\") pod \"redhat-operators-gfmk8\" (UID: \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\") " pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.757502 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-utilities\") pod \"redhat-operators-gfmk8\" (UID: \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\") " pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.758454 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-utilities\") pod \"redhat-operators-gfmk8\" (UID: \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\") " pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.766156 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-catalog-content\") pod \"redhat-operators-gfmk8\" (UID: \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\") " pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.768077 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.802855 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fs2xl\" (UniqueName: \"kubernetes.io/projected/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-kube-api-access-fs2xl\") pod \"redhat-operators-gfmk8\" (UID: \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\") " pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.858870 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.878002 4769 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.878046 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.881074 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.903421 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ccfz8"] Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.904901 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.931084 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ccfz8"] Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.946221 4769 generic.go:334] "Generic (PLEG): container finished" podID="f1f73c3a-3ade-470b-9e77-000116d1b631" containerID="e35295c12828caa0c01012f94aad1e2010b2f54dff2d00656b24b17f84730206" exitCode=0 Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.946697 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x7wr9" event={"ID":"f1f73c3a-3ade-470b-9e77-000116d1b631","Type":"ContainerDied","Data":"e35295c12828caa0c01012f94aad1e2010b2f54dff2d00656b24b17f84730206"} Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.954543 4769 patch_prober.go:28] interesting pod/router-default-5444994796-zs9d4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:46:52 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Nov 25 09:46:52 crc kubenswrapper[4769]: [+]process-running ok Nov 25 09:46:52 crc kubenswrapper[4769]: healthz check failed Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.954624 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zs9d4" podUID="bfcd91d2-c8b0-4648-bcc5-207b11a54ece" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.968009 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-utilities\") pod \"redhat-operators-ccfz8\" (UID: \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\") " pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.968213 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-catalog-content\") pod \"redhat-operators-ccfz8\" (UID: \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\") " pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.968241 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v8d6\" (UniqueName: \"kubernetes.io/projected/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-kube-api-access-2v8d6\") pod \"redhat-operators-ccfz8\" (UID: \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\") " pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.987309 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-xrd6w\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.988039 4769 generic.go:334] "Generic (PLEG): container finished" podID="21dad565-8414-4796-b777-e63f27b9c666" containerID="039423952c8f0d9590aaeeab3b5c963aa92c26970c18c88a2a596c36db833521" exitCode=0 Nov 25 09:46:52 crc kubenswrapper[4769]: I1125 09:46:52.988164 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" event={"ID":"21dad565-8414-4796-b777-e63f27b9c666","Type":"ContainerDied","Data":"039423952c8f0d9590aaeeab3b5c963aa92c26970c18c88a2a596c36db833521"} Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.027820 4769 generic.go:334] "Generic (PLEG): container finished" podID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" containerID="93ff5604c514891dc5a4e7491656d61129abb396c8d51aa117ed19503fc5b1e1" exitCode=0 Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.028718 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7kqx5" event={"ID":"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5","Type":"ContainerDied","Data":"93ff5604c514891dc5a4e7491656d61129abb396c8d51aa117ed19503fc5b1e1"} Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.028756 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7kqx5" event={"ID":"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5","Type":"ContainerStarted","Data":"e8b3bbf28682918836fe5a6f97b3e6515b735117231c6bad1fea4b11254e9fdc"} Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.043263 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l662h" event={"ID":"a51756e2-325b-4979-9e50-2a5cf4ca302f","Type":"ContainerStarted","Data":"9b7aedf0afe559df148da62554d283eca7e1dc3e713c3dae96c676cc853aa952"} Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.069675 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v8d6\" (UniqueName: \"kubernetes.io/projected/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-kube-api-access-2v8d6\") pod \"redhat-operators-ccfz8\" (UID: \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\") " pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.069728 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-catalog-content\") pod \"redhat-operators-ccfz8\" (UID: \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\") " pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.069846 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-utilities\") pod \"redhat-operators-ccfz8\" (UID: \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\") " pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.070596 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-utilities\") pod \"redhat-operators-ccfz8\" (UID: \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\") " pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.071268 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-catalog-content\") pod \"redhat-operators-ccfz8\" (UID: \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\") " pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.097625 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v8d6\" (UniqueName: \"kubernetes.io/projected/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-kube-api-access-2v8d6\") pod \"redhat-operators-ccfz8\" (UID: \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\") " pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.251638 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:53 crc kubenswrapper[4769]: E1125 09:46:53.325838 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda51756e2_325b_4979_9e50_2a5cf4ca302f.slice/crio-ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda51756e2_325b_4979_9e50_2a5cf4ca302f.slice/crio-conmon-ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.331516 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.333845 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.341060 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.343213 4769 patch_prober.go:28] interesting pod/console-f9d7485db-ghgwj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.7:8443/health\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.343272 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-ghgwj" podUID="2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" containerName="console" probeResult="failure" output="Get \"https://10.217.0.7:8443/health\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.381298 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.471126 4769 patch_prober.go:28] interesting pod/downloads-7954f5f757-l2kg2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.471195 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-l2kg2" podUID="01b73250-0ba8-4c2d-9ada-dacc89a70a7d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.472772 4769 patch_prober.go:28] interesting pod/downloads-7954f5f757-l2kg2 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.472832 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-l2kg2" podUID="01b73250-0ba8-4c2d-9ada-dacc89a70a7d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.505413 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gfmk8"] Nov 25 09:46:53 crc kubenswrapper[4769]: W1125 09:46:53.529732 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51cc9569_8a6c_473c_a6c7_628c4c7e1aed.slice/crio-f8442b261fa35fc207e0b706f146b2935cf1870321c925767662322f45df0cc0 WatchSource:0}: Error finding container f8442b261fa35fc207e0b706f146b2935cf1870321c925767662322f45df0cc0: Status 404 returned error can't find the container with id f8442b261fa35fc207e0b706f146b2935cf1870321c925767662322f45df0cc0 Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.536469 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.546338 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-shvhw" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.630267 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-xrd6w"] Nov 25 09:46:53 crc kubenswrapper[4769]: W1125 09:46:53.651068 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice/crio-817d608979207d4a2be3456a1413c862b9719c56296bb1aadf07c913375c6e10 WatchSource:0}: Error finding container 817d608979207d4a2be3456a1413c862b9719c56296bb1aadf07c913375c6e10: Status 404 returned error can't find the container with id 817d608979207d4a2be3456a1413c862b9719c56296bb1aadf07c913375c6e10 Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.654997 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.655044 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.678370 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.724221 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.744829 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d7jmg" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.940940 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.953690 4769 patch_prober.go:28] interesting pod/router-default-5444994796-zs9d4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:46:53 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Nov 25 09:46:53 crc kubenswrapper[4769]: [+]process-running ok Nov 25 09:46:53 crc kubenswrapper[4769]: healthz check failed Nov 25 09:46:53 crc kubenswrapper[4769]: I1125 09:46:53.953800 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zs9d4" podUID="bfcd91d2-c8b0-4648-bcc5-207b11a54ece" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.074659 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ccfz8"] Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.105428 4769 generic.go:334] "Generic (PLEG): container finished" podID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" containerID="04c68de936e4d9c785414d36434ca33cc59e0eb0f94876af94f7f401c44009b5" exitCode=0 Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.105565 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gfmk8" event={"ID":"51cc9569-8a6c-473c-a6c7-628c4c7e1aed","Type":"ContainerDied","Data":"04c68de936e4d9c785414d36434ca33cc59e0eb0f94876af94f7f401c44009b5"} Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.105611 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gfmk8" event={"ID":"51cc9569-8a6c-473c-a6c7-628c4c7e1aed","Type":"ContainerStarted","Data":"f8442b261fa35fc207e0b706f146b2935cf1870321c925767662322f45df0cc0"} Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.134672 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" event={"ID":"fb4b7e91-71e7-4719-b0c5-35d132cf6115","Type":"ContainerStarted","Data":"817d608979207d4a2be3456a1413c862b9719c56296bb1aadf07c913375c6e10"} Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.134857 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.137296 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"9ccae56f-65fc-4548-a179-e4fbd7b24716","Type":"ContainerStarted","Data":"b4194706e716ad9b5aea28988469698b8f757fd1cf7842cff0c1ee9c37b6044f"} Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.164901 4769 generic.go:334] "Generic (PLEG): container finished" podID="a51756e2-325b-4979-9e50-2a5cf4ca302f" containerID="ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7" exitCode=0 Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.167094 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l662h" event={"ID":"a51756e2-325b-4979-9e50-2a5cf4ca302f","Type":"ContainerDied","Data":"ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7"} Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.174398 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w82wc" Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.196749 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" podStartSLOduration=137.196726862 podStartE2EDuration="2m17.196726862s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:54.161138918 +0000 UTC m=+162.746111241" watchObservedRunningTime="2025-11-25 09:46:54.196726862 +0000 UTC m=+162.781699165" Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.250863 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.651254 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.704933 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/21dad565-8414-4796-b777-e63f27b9c666-config-volume\") pod \"21dad565-8414-4796-b777-e63f27b9c666\" (UID: \"21dad565-8414-4796-b777-e63f27b9c666\") " Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.705017 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/21dad565-8414-4796-b777-e63f27b9c666-secret-volume\") pod \"21dad565-8414-4796-b777-e63f27b9c666\" (UID: \"21dad565-8414-4796-b777-e63f27b9c666\") " Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.705107 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5f4j9\" (UniqueName: \"kubernetes.io/projected/21dad565-8414-4796-b777-e63f27b9c666-kube-api-access-5f4j9\") pod \"21dad565-8414-4796-b777-e63f27b9c666\" (UID: \"21dad565-8414-4796-b777-e63f27b9c666\") " Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.706408 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21dad565-8414-4796-b777-e63f27b9c666-config-volume" (OuterVolumeSpecName: "config-volume") pod "21dad565-8414-4796-b777-e63f27b9c666" (UID: "21dad565-8414-4796-b777-e63f27b9c666"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.718714 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21dad565-8414-4796-b777-e63f27b9c666-kube-api-access-5f4j9" (OuterVolumeSpecName: "kube-api-access-5f4j9") pod "21dad565-8414-4796-b777-e63f27b9c666" (UID: "21dad565-8414-4796-b777-e63f27b9c666"). InnerVolumeSpecName "kube-api-access-5f4j9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.723378 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21dad565-8414-4796-b777-e63f27b9c666-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "21dad565-8414-4796-b777-e63f27b9c666" (UID: "21dad565-8414-4796-b777-e63f27b9c666"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.809160 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/21dad565-8414-4796-b777-e63f27b9c666-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.809199 4769 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/21dad565-8414-4796-b777-e63f27b9c666-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.809209 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5f4j9\" (UniqueName: \"kubernetes.io/projected/21dad565-8414-4796-b777-e63f27b9c666-kube-api-access-5f4j9\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.945282 4769 patch_prober.go:28] interesting pod/router-default-5444994796-zs9d4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 09:46:54 crc kubenswrapper[4769]: [-]has-synced failed: reason withheld Nov 25 09:46:54 crc kubenswrapper[4769]: [+]process-running ok Nov 25 09:46:54 crc kubenswrapper[4769]: healthz check failed Nov 25 09:46:54 crc kubenswrapper[4769]: I1125 09:46:54.945327 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zs9d4" podUID="bfcd91d2-c8b0-4648-bcc5-207b11a54ece" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:46:55 crc kubenswrapper[4769]: I1125 09:46:55.219383 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" event={"ID":"fb4b7e91-71e7-4719-b0c5-35d132cf6115","Type":"ContainerStarted","Data":"e3629fe431968ff730a9b207287d490fd395ba6df8690abd47dc55dbb279520a"} Nov 25 09:46:55 crc kubenswrapper[4769]: I1125 09:46:55.248448 4769 generic.go:334] "Generic (PLEG): container finished" podID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" containerID="bac4506288a76578f1e33c2f151ed1e6969582b7679f6f76937085cf47fd61dd" exitCode=0 Nov 25 09:46:55 crc kubenswrapper[4769]: I1125 09:46:55.249815 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccfz8" event={"ID":"71d2d50a-7ac8-42a1-ac73-4709f6e57ada","Type":"ContainerDied","Data":"bac4506288a76578f1e33c2f151ed1e6969582b7679f6f76937085cf47fd61dd"} Nov 25 09:46:55 crc kubenswrapper[4769]: I1125 09:46:55.249877 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccfz8" event={"ID":"71d2d50a-7ac8-42a1-ac73-4709f6e57ada","Type":"ContainerStarted","Data":"9fbc7a7123eba42b61b221c9153dd1f41f21aacd3f46db7817a31f7c9bb8b343"} Nov 25 09:46:55 crc kubenswrapper[4769]: I1125 09:46:55.281277 4769 generic.go:334] "Generic (PLEG): container finished" podID="9ccae56f-65fc-4548-a179-e4fbd7b24716" containerID="f449a72f3c58a52640d7c850a39e094dcc6cdf715ed0a3a6e935ae114ef4d6cb" exitCode=0 Nov 25 09:46:55 crc kubenswrapper[4769]: I1125 09:46:55.281419 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"9ccae56f-65fc-4548-a179-e4fbd7b24716","Type":"ContainerDied","Data":"f449a72f3c58a52640d7c850a39e094dcc6cdf715ed0a3a6e935ae114ef4d6cb"} Nov 25 09:46:55 crc kubenswrapper[4769]: I1125 09:46:55.288142 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" Nov 25 09:46:55 crc kubenswrapper[4769]: I1125 09:46:55.288775 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9" event={"ID":"21dad565-8414-4796-b777-e63f27b9c666","Type":"ContainerDied","Data":"274abf9130bce1e994400be936cc1ff992fc76eb1849dfa56817acb8e9cd930e"} Nov 25 09:46:55 crc kubenswrapper[4769]: I1125 09:46:55.288826 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="274abf9130bce1e994400be936cc1ff992fc76eb1849dfa56817acb8e9cd930e" Nov 25 09:46:55 crc kubenswrapper[4769]: I1125 09:46:55.946532 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:55 crc kubenswrapper[4769]: I1125 09:46:55.950986 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-zs9d4" Nov 25 09:46:56 crc kubenswrapper[4769]: I1125 09:46:56.804827 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:46:56 crc kubenswrapper[4769]: I1125 09:46:56.883551 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9ccae56f-65fc-4548-a179-e4fbd7b24716-kube-api-access\") pod \"9ccae56f-65fc-4548-a179-e4fbd7b24716\" (UID: \"9ccae56f-65fc-4548-a179-e4fbd7b24716\") " Nov 25 09:46:56 crc kubenswrapper[4769]: I1125 09:46:56.883656 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9ccae56f-65fc-4548-a179-e4fbd7b24716-kubelet-dir\") pod \"9ccae56f-65fc-4548-a179-e4fbd7b24716\" (UID: \"9ccae56f-65fc-4548-a179-e4fbd7b24716\") " Nov 25 09:46:56 crc kubenswrapper[4769]: I1125 09:46:56.884156 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9ccae56f-65fc-4548-a179-e4fbd7b24716-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "9ccae56f-65fc-4548-a179-e4fbd7b24716" (UID: "9ccae56f-65fc-4548-a179-e4fbd7b24716"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:46:56 crc kubenswrapper[4769]: I1125 09:46:56.919534 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ccae56f-65fc-4548-a179-e4fbd7b24716-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "9ccae56f-65fc-4548-a179-e4fbd7b24716" (UID: "9ccae56f-65fc-4548-a179-e4fbd7b24716"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:46:56 crc kubenswrapper[4769]: I1125 09:46:56.985211 4769 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/9ccae56f-65fc-4548-a179-e4fbd7b24716-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:56 crc kubenswrapper[4769]: I1125 09:46:56.985268 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9ccae56f-65fc-4548-a179-e4fbd7b24716-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.303153 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 09:46:57 crc kubenswrapper[4769]: E1125 09:46:57.303438 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ccae56f-65fc-4548-a179-e4fbd7b24716" containerName="pruner" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.303450 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ccae56f-65fc-4548-a179-e4fbd7b24716" containerName="pruner" Nov 25 09:46:57 crc kubenswrapper[4769]: E1125 09:46:57.303477 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21dad565-8414-4796-b777-e63f27b9c666" containerName="collect-profiles" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.303485 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="21dad565-8414-4796-b777-e63f27b9c666" containerName="collect-profiles" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.303595 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ccae56f-65fc-4548-a179-e4fbd7b24716" containerName="pruner" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.303608 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="21dad565-8414-4796-b777-e63f27b9c666" containerName="collect-profiles" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.304092 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.306188 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.306918 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.321076 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.354704 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.354606 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"9ccae56f-65fc-4548-a179-e4fbd7b24716","Type":"ContainerDied","Data":"b4194706e716ad9b5aea28988469698b8f757fd1cf7842cff0c1ee9c37b6044f"} Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.362535 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4194706e716ad9b5aea28988469698b8f757fd1cf7842cff0c1ee9c37b6044f" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.393806 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64dbbafa-5fc1-4500-9b49-9d344bf6b356-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"64dbbafa-5fc1-4500-9b49-9d344bf6b356\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.393853 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/64dbbafa-5fc1-4500-9b49-9d344bf6b356-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"64dbbafa-5fc1-4500-9b49-9d344bf6b356\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.496179 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/64dbbafa-5fc1-4500-9b49-9d344bf6b356-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"64dbbafa-5fc1-4500-9b49-9d344bf6b356\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.496356 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/64dbbafa-5fc1-4500-9b49-9d344bf6b356-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"64dbbafa-5fc1-4500-9b49-9d344bf6b356\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.496554 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64dbbafa-5fc1-4500-9b49-9d344bf6b356-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"64dbbafa-5fc1-4500-9b49-9d344bf6b356\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.529750 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64dbbafa-5fc1-4500-9b49-9d344bf6b356-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"64dbbafa-5fc1-4500-9b49-9d344bf6b356\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:46:57 crc kubenswrapper[4769]: I1125 09:46:57.637294 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:46:58 crc kubenswrapper[4769]: I1125 09:46:58.062541 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 09:46:58 crc kubenswrapper[4769]: W1125 09:46:58.145149 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod64dbbafa_5fc1_4500_9b49_9d344bf6b356.slice/crio-463b720a34147b6d8ac567c1de7dbcb61a499343e42253d04d3a9dadf141b645 WatchSource:0}: Error finding container 463b720a34147b6d8ac567c1de7dbcb61a499343e42253d04d3a9dadf141b645: Status 404 returned error can't find the container with id 463b720a34147b6d8ac567c1de7dbcb61a499343e42253d04d3a9dadf141b645 Nov 25 09:46:58 crc kubenswrapper[4769]: I1125 09:46:58.433911 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"64dbbafa-5fc1-4500-9b49-9d344bf6b356","Type":"ContainerStarted","Data":"463b720a34147b6d8ac567c1de7dbcb61a499343e42253d04d3a9dadf141b645"} Nov 25 09:46:59 crc kubenswrapper[4769]: I1125 09:46:59.429895 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-hn7p6" Nov 25 09:46:59 crc kubenswrapper[4769]: I1125 09:46:59.452322 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"64dbbafa-5fc1-4500-9b49-9d344bf6b356","Type":"ContainerStarted","Data":"1dd383272f6ddf412e99114a3ede79b3bab1c6d75f12255fdf7d1762f34f87d6"} Nov 25 09:46:59 crc kubenswrapper[4769]: I1125 09:46:59.483829 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.4838059169999998 podStartE2EDuration="2.483805917s" podCreationTimestamp="2025-11-25 09:46:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:46:59.478280365 +0000 UTC m=+168.063252678" watchObservedRunningTime="2025-11-25 09:46:59.483805917 +0000 UTC m=+168.068778230" Nov 25 09:47:00 crc kubenswrapper[4769]: I1125 09:47:00.148687 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:47:00 crc kubenswrapper[4769]: I1125 09:47:00.155884 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/76770e00-0d61-45ae-9772-1e8c42dc6ea6-metrics-certs\") pod \"network-metrics-daemon-7khh9\" (UID: \"76770e00-0d61-45ae-9772-1e8c42dc6ea6\") " pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:47:00 crc kubenswrapper[4769]: I1125 09:47:00.286332 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-7khh9" Nov 25 09:47:00 crc kubenswrapper[4769]: I1125 09:47:00.464080 4769 generic.go:334] "Generic (PLEG): container finished" podID="64dbbafa-5fc1-4500-9b49-9d344bf6b356" containerID="1dd383272f6ddf412e99114a3ede79b3bab1c6d75f12255fdf7d1762f34f87d6" exitCode=0 Nov 25 09:47:00 crc kubenswrapper[4769]: I1125 09:47:00.464220 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"64dbbafa-5fc1-4500-9b49-9d344bf6b356","Type":"ContainerDied","Data":"1dd383272f6ddf412e99114a3ede79b3bab1c6d75f12255fdf7d1762f34f87d6"} Nov 25 09:47:00 crc kubenswrapper[4769]: I1125 09:47:00.741934 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-7khh9"] Nov 25 09:47:00 crc kubenswrapper[4769]: W1125 09:47:00.794844 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76770e00_0d61_45ae_9772_1e8c42dc6ea6.slice/crio-0ef99bc663a94ecc8a4aea292e54aa3ffb0cdf03be67ffe0d992068fa8de8649 WatchSource:0}: Error finding container 0ef99bc663a94ecc8a4aea292e54aa3ffb0cdf03be67ffe0d992068fa8de8649: Status 404 returned error can't find the container with id 0ef99bc663a94ecc8a4aea292e54aa3ffb0cdf03be67ffe0d992068fa8de8649 Nov 25 09:47:01 crc kubenswrapper[4769]: I1125 09:47:01.478858 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-7khh9" event={"ID":"76770e00-0d61-45ae-9772-1e8c42dc6ea6","Type":"ContainerStarted","Data":"0ef99bc663a94ecc8a4aea292e54aa3ffb0cdf03be67ffe0d992068fa8de8649"} Nov 25 09:47:01 crc kubenswrapper[4769]: I1125 09:47:01.805508 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:47:01 crc kubenswrapper[4769]: I1125 09:47:01.971701 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64dbbafa-5fc1-4500-9b49-9d344bf6b356-kube-api-access\") pod \"64dbbafa-5fc1-4500-9b49-9d344bf6b356\" (UID: \"64dbbafa-5fc1-4500-9b49-9d344bf6b356\") " Nov 25 09:47:01 crc kubenswrapper[4769]: I1125 09:47:01.971786 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/64dbbafa-5fc1-4500-9b49-9d344bf6b356-kubelet-dir\") pod \"64dbbafa-5fc1-4500-9b49-9d344bf6b356\" (UID: \"64dbbafa-5fc1-4500-9b49-9d344bf6b356\") " Nov 25 09:47:01 crc kubenswrapper[4769]: I1125 09:47:01.971941 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/64dbbafa-5fc1-4500-9b49-9d344bf6b356-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "64dbbafa-5fc1-4500-9b49-9d344bf6b356" (UID: "64dbbafa-5fc1-4500-9b49-9d344bf6b356"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:47:01 crc kubenswrapper[4769]: I1125 09:47:01.972378 4769 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/64dbbafa-5fc1-4500-9b49-9d344bf6b356-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:01 crc kubenswrapper[4769]: I1125 09:47:01.994342 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64dbbafa-5fc1-4500-9b49-9d344bf6b356-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "64dbbafa-5fc1-4500-9b49-9d344bf6b356" (UID: "64dbbafa-5fc1-4500-9b49-9d344bf6b356"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:02 crc kubenswrapper[4769]: I1125 09:47:02.073611 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64dbbafa-5fc1-4500-9b49-9d344bf6b356-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:02 crc kubenswrapper[4769]: I1125 09:47:02.498066 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-7khh9" event={"ID":"76770e00-0d61-45ae-9772-1e8c42dc6ea6","Type":"ContainerStarted","Data":"9abc5c321904e029c153bbe3ea851dfe2fd8cfd416e7a321e02f4bf83f1cf763"} Nov 25 09:47:02 crc kubenswrapper[4769]: I1125 09:47:02.505832 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"64dbbafa-5fc1-4500-9b49-9d344bf6b356","Type":"ContainerDied","Data":"463b720a34147b6d8ac567c1de7dbcb61a499343e42253d04d3a9dadf141b645"} Nov 25 09:47:02 crc kubenswrapper[4769]: I1125 09:47:02.505879 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="463b720a34147b6d8ac567c1de7dbcb61a499343e42253d04d3a9dadf141b645" Nov 25 09:47:02 crc kubenswrapper[4769]: I1125 09:47:02.505950 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 09:47:03 crc kubenswrapper[4769]: I1125 09:47:03.397742 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:47:03 crc kubenswrapper[4769]: I1125 09:47:03.406039 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:47:03 crc kubenswrapper[4769]: I1125 09:47:03.473106 4769 patch_prober.go:28] interesting pod/downloads-7954f5f757-l2kg2 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 25 09:47:03 crc kubenswrapper[4769]: I1125 09:47:03.473174 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-l2kg2" podUID="01b73250-0ba8-4c2d-9ada-dacc89a70a7d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 25 09:47:03 crc kubenswrapper[4769]: I1125 09:47:03.474032 4769 patch_prober.go:28] interesting pod/downloads-7954f5f757-l2kg2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 25 09:47:03 crc kubenswrapper[4769]: I1125 09:47:03.474133 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-l2kg2" podUID="01b73250-0ba8-4c2d-9ada-dacc89a70a7d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 25 09:47:09 crc kubenswrapper[4769]: I1125 09:47:09.370776 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 09:47:13 crc kubenswrapper[4769]: I1125 09:47:13.257892 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:47:13 crc kubenswrapper[4769]: I1125 09:47:13.478312 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-l2kg2" Nov 25 09:47:22 crc kubenswrapper[4769]: I1125 09:47:22.291044 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:47:22 crc kubenswrapper[4769]: I1125 09:47:22.291790 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:47:24 crc kubenswrapper[4769]: I1125 09:47:24.004779 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-w55h7" Nov 25 09:47:28 crc kubenswrapper[4769]: E1125 09:47:28.226469 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 09:47:28 crc kubenswrapper[4769]: E1125 09:47:28.227624 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2v8d6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-ccfz8_openshift-marketplace(71d2d50a-7ac8-42a1-ac73-4709f6e57ada): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:47:28 crc kubenswrapper[4769]: E1125 09:47:28.231351 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-ccfz8" podUID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" Nov 25 09:47:29 crc kubenswrapper[4769]: E1125 09:47:29.526940 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-ccfz8" podUID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" Nov 25 09:47:30 crc kubenswrapper[4769]: E1125 09:47:30.662172 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 09:47:30 crc kubenswrapper[4769]: E1125 09:47:30.662783 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tbhv6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-7kqx5_openshift-marketplace(ff08cdaf-0d49-4f4d-bc40-977a970ef1a5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:47:30 crc kubenswrapper[4769]: E1125 09:47:30.664031 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-7kqx5" podUID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" Nov 25 09:47:30 crc kubenswrapper[4769]: E1125 09:47:30.698432 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 09:47:30 crc kubenswrapper[4769]: E1125 09:47:30.698649 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fs2xl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-gfmk8_openshift-marketplace(51cc9569-8a6c-473c-a6c7-628c4c7e1aed): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:47:30 crc kubenswrapper[4769]: E1125 09:47:30.702522 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-gfmk8" podUID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.090134 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-7kqx5" podUID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.090249 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-gfmk8" podUID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.169156 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.169316 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2f7f5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-qmkgl_openshift-marketplace(1c9095dc-a1a8-4e5c-a357-dae9aaf966e1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.170785 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-qmkgl" podUID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.203479 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.203663 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kzflc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-l662h_openshift-marketplace(a51756e2-325b-4979-9e50-2a5cf4ca302f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.204851 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-l662h" podUID="a51756e2-325b-4979-9e50-2a5cf4ca302f" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.226087 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.226523 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qtmmr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-c4dbm_openshift-marketplace(b8cf525e-b1f8-447d-bde3-7a8746836e39): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.227706 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-c4dbm" podUID="b8cf525e-b1f8-447d-bde3-7a8746836e39" Nov 25 09:47:32 crc kubenswrapper[4769]: I1125 09:47:32.695173 4769 generic.go:334] "Generic (PLEG): container finished" podID="1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" containerID="71226d5395abb437d9f1eda4b28ddd7f32fee4a2016ea0f697b5023e4122d7ec" exitCode=0 Nov 25 09:47:32 crc kubenswrapper[4769]: I1125 09:47:32.695279 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7m4kh" event={"ID":"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6","Type":"ContainerDied","Data":"71226d5395abb437d9f1eda4b28ddd7f32fee4a2016ea0f697b5023e4122d7ec"} Nov 25 09:47:32 crc kubenswrapper[4769]: I1125 09:47:32.698269 4769 generic.go:334] "Generic (PLEG): container finished" podID="f1f73c3a-3ade-470b-9e77-000116d1b631" containerID="268f59764f0b71a75a97939a6b129dbc71b642a24702917452b88c08cba7f51e" exitCode=0 Nov 25 09:47:32 crc kubenswrapper[4769]: I1125 09:47:32.699585 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x7wr9" event={"ID":"f1f73c3a-3ade-470b-9e77-000116d1b631","Type":"ContainerDied","Data":"268f59764f0b71a75a97939a6b129dbc71b642a24702917452b88c08cba7f51e"} Nov 25 09:47:32 crc kubenswrapper[4769]: I1125 09:47:32.703109 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-7khh9" event={"ID":"76770e00-0d61-45ae-9772-1e8c42dc6ea6","Type":"ContainerStarted","Data":"46ba3442bb3dc56a1e9e50e8c91472a814b9867f7bbca009cf871fbee5977e06"} Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.705873 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-l662h" podUID="a51756e2-325b-4979-9e50-2a5cf4ca302f" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.710224 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-c4dbm" podUID="b8cf525e-b1f8-447d-bde3-7a8746836e39" Nov 25 09:47:32 crc kubenswrapper[4769]: E1125 09:47:32.710399 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-qmkgl" podUID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" Nov 25 09:47:32 crc kubenswrapper[4769]: I1125 09:47:32.758680 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-7khh9" podStartSLOduration=175.758659498 podStartE2EDuration="2m55.758659498s" podCreationTimestamp="2025-11-25 09:44:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:47:32.736484379 +0000 UTC m=+201.321456712" watchObservedRunningTime="2025-11-25 09:47:32.758659498 +0000 UTC m=+201.343631811" Nov 25 09:47:33 crc kubenswrapper[4769]: I1125 09:47:33.709859 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x7wr9" event={"ID":"f1f73c3a-3ade-470b-9e77-000116d1b631","Type":"ContainerStarted","Data":"93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168"} Nov 25 09:47:33 crc kubenswrapper[4769]: I1125 09:47:33.713560 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7m4kh" event={"ID":"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6","Type":"ContainerStarted","Data":"00ffee69e6cd4dd3ef63b23b942dd7908bdb19a5d709fe8f3361759d445d04a6"} Nov 25 09:47:33 crc kubenswrapper[4769]: I1125 09:47:33.735388 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x7wr9" podStartSLOduration=4.507869063 podStartE2EDuration="44.735077893s" podCreationTimestamp="2025-11-25 09:46:49 +0000 UTC" firstStartedPulling="2025-11-25 09:46:52.957469306 +0000 UTC m=+161.542441619" lastFinishedPulling="2025-11-25 09:47:33.184678136 +0000 UTC m=+201.769650449" observedRunningTime="2025-11-25 09:47:33.731994658 +0000 UTC m=+202.316966991" watchObservedRunningTime="2025-11-25 09:47:33.735077893 +0000 UTC m=+202.320050206" Nov 25 09:47:33 crc kubenswrapper[4769]: I1125 09:47:33.759705 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7m4kh" podStartSLOduration=3.430639959 podStartE2EDuration="44.759687864s" podCreationTimestamp="2025-11-25 09:46:49 +0000 UTC" firstStartedPulling="2025-11-25 09:46:51.899140501 +0000 UTC m=+160.484112814" lastFinishedPulling="2025-11-25 09:47:33.228188366 +0000 UTC m=+201.813160719" observedRunningTime="2025-11-25 09:47:33.757370275 +0000 UTC m=+202.342342588" watchObservedRunningTime="2025-11-25 09:47:33.759687864 +0000 UTC m=+202.344660177" Nov 25 09:47:39 crc kubenswrapper[4769]: I1125 09:47:39.884155 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:47:39 crc kubenswrapper[4769]: I1125 09:47:39.885988 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:47:40 crc kubenswrapper[4769]: I1125 09:47:40.034920 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:47:40 crc kubenswrapper[4769]: I1125 09:47:40.380012 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:47:40 crc kubenswrapper[4769]: I1125 09:47:40.380081 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:47:40 crc kubenswrapper[4769]: I1125 09:47:40.422002 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:47:40 crc kubenswrapper[4769]: I1125 09:47:40.797091 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:47:40 crc kubenswrapper[4769]: I1125 09:47:40.797168 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:47:41 crc kubenswrapper[4769]: I1125 09:47:41.267907 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x7wr9"] Nov 25 09:47:42 crc kubenswrapper[4769]: I1125 09:47:42.762453 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-x7wr9" podUID="f1f73c3a-3ade-470b-9e77-000116d1b631" containerName="registry-server" containerID="cri-o://93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168" gracePeriod=2 Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.109788 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.299925 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1f73c3a-3ade-470b-9e77-000116d1b631-utilities\") pod \"f1f73c3a-3ade-470b-9e77-000116d1b631\" (UID: \"f1f73c3a-3ade-470b-9e77-000116d1b631\") " Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.300056 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6v9s8\" (UniqueName: \"kubernetes.io/projected/f1f73c3a-3ade-470b-9e77-000116d1b631-kube-api-access-6v9s8\") pod \"f1f73c3a-3ade-470b-9e77-000116d1b631\" (UID: \"f1f73c3a-3ade-470b-9e77-000116d1b631\") " Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.300129 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1f73c3a-3ade-470b-9e77-000116d1b631-catalog-content\") pod \"f1f73c3a-3ade-470b-9e77-000116d1b631\" (UID: \"f1f73c3a-3ade-470b-9e77-000116d1b631\") " Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.300813 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1f73c3a-3ade-470b-9e77-000116d1b631-utilities" (OuterVolumeSpecName: "utilities") pod "f1f73c3a-3ade-470b-9e77-000116d1b631" (UID: "f1f73c3a-3ade-470b-9e77-000116d1b631"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.308235 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1f73c3a-3ade-470b-9e77-000116d1b631-kube-api-access-6v9s8" (OuterVolumeSpecName: "kube-api-access-6v9s8") pod "f1f73c3a-3ade-470b-9e77-000116d1b631" (UID: "f1f73c3a-3ade-470b-9e77-000116d1b631"). InnerVolumeSpecName "kube-api-access-6v9s8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.402039 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1f73c3a-3ade-470b-9e77-000116d1b631-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.402104 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6v9s8\" (UniqueName: \"kubernetes.io/projected/f1f73c3a-3ade-470b-9e77-000116d1b631-kube-api-access-6v9s8\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.656658 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1f73c3a-3ade-470b-9e77-000116d1b631-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f1f73c3a-3ade-470b-9e77-000116d1b631" (UID: "f1f73c3a-3ade-470b-9e77-000116d1b631"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.706707 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1f73c3a-3ade-470b-9e77-000116d1b631-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.772197 4769 generic.go:334] "Generic (PLEG): container finished" podID="f1f73c3a-3ade-470b-9e77-000116d1b631" containerID="93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168" exitCode=0 Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.772267 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x7wr9" event={"ID":"f1f73c3a-3ade-470b-9e77-000116d1b631","Type":"ContainerDied","Data":"93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168"} Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.772320 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x7wr9" event={"ID":"f1f73c3a-3ade-470b-9e77-000116d1b631","Type":"ContainerDied","Data":"c6cb8737bbf99da5e6f26c307c7404be98d80c43881793a90b4cc3a26df66d24"} Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.772343 4769 scope.go:117] "RemoveContainer" containerID="93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.772390 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x7wr9" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.810005 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x7wr9"] Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.813481 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-x7wr9"] Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.817294 4769 scope.go:117] "RemoveContainer" containerID="268f59764f0b71a75a97939a6b129dbc71b642a24702917452b88c08cba7f51e" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.838739 4769 scope.go:117] "RemoveContainer" containerID="e35295c12828caa0c01012f94aad1e2010b2f54dff2d00656b24b17f84730206" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.860150 4769 scope.go:117] "RemoveContainer" containerID="93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168" Nov 25 09:47:43 crc kubenswrapper[4769]: E1125 09:47:43.861045 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168\": container with ID starting with 93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168 not found: ID does not exist" containerID="93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.861126 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168"} err="failed to get container status \"93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168\": rpc error: code = NotFound desc = could not find container \"93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168\": container with ID starting with 93e3da2645097a95f46218978ccc577febbfd04272efec9001ccca90fdc3c168 not found: ID does not exist" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.861214 4769 scope.go:117] "RemoveContainer" containerID="268f59764f0b71a75a97939a6b129dbc71b642a24702917452b88c08cba7f51e" Nov 25 09:47:43 crc kubenswrapper[4769]: E1125 09:47:43.861825 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"268f59764f0b71a75a97939a6b129dbc71b642a24702917452b88c08cba7f51e\": container with ID starting with 268f59764f0b71a75a97939a6b129dbc71b642a24702917452b88c08cba7f51e not found: ID does not exist" containerID="268f59764f0b71a75a97939a6b129dbc71b642a24702917452b88c08cba7f51e" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.861865 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"268f59764f0b71a75a97939a6b129dbc71b642a24702917452b88c08cba7f51e"} err="failed to get container status \"268f59764f0b71a75a97939a6b129dbc71b642a24702917452b88c08cba7f51e\": rpc error: code = NotFound desc = could not find container \"268f59764f0b71a75a97939a6b129dbc71b642a24702917452b88c08cba7f51e\": container with ID starting with 268f59764f0b71a75a97939a6b129dbc71b642a24702917452b88c08cba7f51e not found: ID does not exist" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.861895 4769 scope.go:117] "RemoveContainer" containerID="e35295c12828caa0c01012f94aad1e2010b2f54dff2d00656b24b17f84730206" Nov 25 09:47:43 crc kubenswrapper[4769]: E1125 09:47:43.862312 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e35295c12828caa0c01012f94aad1e2010b2f54dff2d00656b24b17f84730206\": container with ID starting with e35295c12828caa0c01012f94aad1e2010b2f54dff2d00656b24b17f84730206 not found: ID does not exist" containerID="e35295c12828caa0c01012f94aad1e2010b2f54dff2d00656b24b17f84730206" Nov 25 09:47:43 crc kubenswrapper[4769]: I1125 09:47:43.862335 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e35295c12828caa0c01012f94aad1e2010b2f54dff2d00656b24b17f84730206"} err="failed to get container status \"e35295c12828caa0c01012f94aad1e2010b2f54dff2d00656b24b17f84730206\": rpc error: code = NotFound desc = could not find container \"e35295c12828caa0c01012f94aad1e2010b2f54dff2d00656b24b17f84730206\": container with ID starting with e35295c12828caa0c01012f94aad1e2010b2f54dff2d00656b24b17f84730206 not found: ID does not exist" Nov 25 09:47:44 crc kubenswrapper[4769]: I1125 09:47:44.250748 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1f73c3a-3ade-470b-9e77-000116d1b631" path="/var/lib/kubelet/pods/f1f73c3a-3ade-470b-9e77-000116d1b631/volumes" Nov 25 09:47:46 crc kubenswrapper[4769]: I1125 09:47:46.793418 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccfz8" event={"ID":"71d2d50a-7ac8-42a1-ac73-4709f6e57ada","Type":"ContainerStarted","Data":"23fdac6686aa4c8837bfde3e9a6478ccad902891888f04b9f1de1c19cadddca2"} Nov 25 09:47:46 crc kubenswrapper[4769]: I1125 09:47:46.796629 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gfmk8" event={"ID":"51cc9569-8a6c-473c-a6c7-628c4c7e1aed","Type":"ContainerStarted","Data":"2478a6131b4dbc071585e61e36361b315efbfee7214533fb271707184453cde6"} Nov 25 09:47:47 crc kubenswrapper[4769]: I1125 09:47:47.807364 4769 generic.go:334] "Generic (PLEG): container finished" podID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" containerID="df61445c9976b18a2074fae62a4abe562fd117e480b98e8ff8a7325f97ece260" exitCode=0 Nov 25 09:47:47 crc kubenswrapper[4769]: I1125 09:47:47.807457 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7kqx5" event={"ID":"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5","Type":"ContainerDied","Data":"df61445c9976b18a2074fae62a4abe562fd117e480b98e8ff8a7325f97ece260"} Nov 25 09:47:47 crc kubenswrapper[4769]: I1125 09:47:47.810689 4769 generic.go:334] "Generic (PLEG): container finished" podID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" containerID="2478a6131b4dbc071585e61e36361b315efbfee7214533fb271707184453cde6" exitCode=0 Nov 25 09:47:47 crc kubenswrapper[4769]: I1125 09:47:47.810757 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gfmk8" event={"ID":"51cc9569-8a6c-473c-a6c7-628c4c7e1aed","Type":"ContainerDied","Data":"2478a6131b4dbc071585e61e36361b315efbfee7214533fb271707184453cde6"} Nov 25 09:47:47 crc kubenswrapper[4769]: I1125 09:47:47.822306 4769 generic.go:334] "Generic (PLEG): container finished" podID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" containerID="23fdac6686aa4c8837bfde3e9a6478ccad902891888f04b9f1de1c19cadddca2" exitCode=0 Nov 25 09:47:47 crc kubenswrapper[4769]: I1125 09:47:47.822379 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccfz8" event={"ID":"71d2d50a-7ac8-42a1-ac73-4709f6e57ada","Type":"ContainerDied","Data":"23fdac6686aa4c8837bfde3e9a6478ccad902891888f04b9f1de1c19cadddca2"} Nov 25 09:47:48 crc kubenswrapper[4769]: I1125 09:47:48.832906 4769 generic.go:334] "Generic (PLEG): container finished" podID="b8cf525e-b1f8-447d-bde3-7a8746836e39" containerID="88cbc615d5e7d0867efd0f9206563c40847cafdfdf13d22f9e81d09efc3154f7" exitCode=0 Nov 25 09:47:48 crc kubenswrapper[4769]: I1125 09:47:48.832980 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4dbm" event={"ID":"b8cf525e-b1f8-447d-bde3-7a8746836e39","Type":"ContainerDied","Data":"88cbc615d5e7d0867efd0f9206563c40847cafdfdf13d22f9e81d09efc3154f7"} Nov 25 09:47:48 crc kubenswrapper[4769]: I1125 09:47:48.837687 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccfz8" event={"ID":"71d2d50a-7ac8-42a1-ac73-4709f6e57ada","Type":"ContainerStarted","Data":"0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd"} Nov 25 09:47:48 crc kubenswrapper[4769]: I1125 09:47:48.840524 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7kqx5" event={"ID":"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5","Type":"ContainerStarted","Data":"e60ca5c15e6f4f364e711a1439220217cfb50c172de86c71e130e03ad1edc5d6"} Nov 25 09:47:48 crc kubenswrapper[4769]: I1125 09:47:48.843555 4769 generic.go:334] "Generic (PLEG): container finished" podID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" containerID="d1c9002ea306b70de40a42a24bcdd04b415f976ccfd3ae21589995a7f8189314" exitCode=0 Nov 25 09:47:48 crc kubenswrapper[4769]: I1125 09:47:48.843838 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmkgl" event={"ID":"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1","Type":"ContainerDied","Data":"d1c9002ea306b70de40a42a24bcdd04b415f976ccfd3ae21589995a7f8189314"} Nov 25 09:47:48 crc kubenswrapper[4769]: I1125 09:47:48.866840 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gfmk8" event={"ID":"51cc9569-8a6c-473c-a6c7-628c4c7e1aed","Type":"ContainerStarted","Data":"0fc225b6eb3ce8c6318681fc3e39185462b760f526c4c60711cb422454666784"} Nov 25 09:47:48 crc kubenswrapper[4769]: I1125 09:47:48.916415 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7kqx5" podStartSLOduration=2.635632028 podStartE2EDuration="57.916390081s" podCreationTimestamp="2025-11-25 09:46:51 +0000 UTC" firstStartedPulling="2025-11-25 09:46:53.040096135 +0000 UTC m=+161.625068448" lastFinishedPulling="2025-11-25 09:47:48.320854188 +0000 UTC m=+216.905826501" observedRunningTime="2025-11-25 09:47:48.912927688 +0000 UTC m=+217.497900011" watchObservedRunningTime="2025-11-25 09:47:48.916390081 +0000 UTC m=+217.501362394" Nov 25 09:47:48 crc kubenswrapper[4769]: I1125 09:47:48.936629 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ccfz8" podStartSLOduration=3.775543574 podStartE2EDuration="56.936603618s" podCreationTimestamp="2025-11-25 09:46:52 +0000 UTC" firstStartedPulling="2025-11-25 09:46:55.276458501 +0000 UTC m=+163.861430814" lastFinishedPulling="2025-11-25 09:47:48.437518545 +0000 UTC m=+217.022490858" observedRunningTime="2025-11-25 09:47:48.934093025 +0000 UTC m=+217.519065348" watchObservedRunningTime="2025-11-25 09:47:48.936603618 +0000 UTC m=+217.521575931" Nov 25 09:47:48 crc kubenswrapper[4769]: I1125 09:47:48.964263 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gfmk8" podStartSLOduration=2.8297377729999997 podStartE2EDuration="56.964235723s" podCreationTimestamp="2025-11-25 09:46:52 +0000 UTC" firstStartedPulling="2025-11-25 09:46:54.108197075 +0000 UTC m=+162.693169388" lastFinishedPulling="2025-11-25 09:47:48.242695025 +0000 UTC m=+216.827667338" observedRunningTime="2025-11-25 09:47:48.9574817 +0000 UTC m=+217.542454013" watchObservedRunningTime="2025-11-25 09:47:48.964235723 +0000 UTC m=+217.549208036" Nov 25 09:47:49 crc kubenswrapper[4769]: I1125 09:47:49.877250 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmkgl" event={"ID":"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1","Type":"ContainerStarted","Data":"69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778"} Nov 25 09:47:49 crc kubenswrapper[4769]: I1125 09:47:49.880661 4769 generic.go:334] "Generic (PLEG): container finished" podID="a51756e2-325b-4979-9e50-2a5cf4ca302f" containerID="e5b2bdff3d629d95f74991c3dd951dd5ab656e9d7c5baf1cb25db2cce09c4e98" exitCode=0 Nov 25 09:47:49 crc kubenswrapper[4769]: I1125 09:47:49.880748 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l662h" event={"ID":"a51756e2-325b-4979-9e50-2a5cf4ca302f","Type":"ContainerDied","Data":"e5b2bdff3d629d95f74991c3dd951dd5ab656e9d7c5baf1cb25db2cce09c4e98"} Nov 25 09:47:49 crc kubenswrapper[4769]: I1125 09:47:49.888556 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4dbm" event={"ID":"b8cf525e-b1f8-447d-bde3-7a8746836e39","Type":"ContainerStarted","Data":"36e244230874911c54b925004fc09cc221d6adfe7e092bb37a5380d83090d852"} Nov 25 09:47:49 crc kubenswrapper[4769]: I1125 09:47:49.913476 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qmkgl" podStartSLOduration=3.517538617 podStartE2EDuration="1m0.913451444s" podCreationTimestamp="2025-11-25 09:46:49 +0000 UTC" firstStartedPulling="2025-11-25 09:46:51.890232092 +0000 UTC m=+160.475204405" lastFinishedPulling="2025-11-25 09:47:49.286144919 +0000 UTC m=+217.871117232" observedRunningTime="2025-11-25 09:47:49.905198209 +0000 UTC m=+218.490170532" watchObservedRunningTime="2025-11-25 09:47:49.913451444 +0000 UTC m=+218.498423757" Nov 25 09:47:50 crc kubenswrapper[4769]: I1125 09:47:50.049778 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:47:50 crc kubenswrapper[4769]: I1125 09:47:50.049867 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:47:51 crc kubenswrapper[4769]: I1125 09:47:51.094752 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-qmkgl" podUID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" containerName="registry-server" probeResult="failure" output=< Nov 25 09:47:51 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 09:47:51 crc kubenswrapper[4769]: > Nov 25 09:47:51 crc kubenswrapper[4769]: I1125 09:47:51.617430 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:47:51 crc kubenswrapper[4769]: I1125 09:47:51.617885 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:47:51 crc kubenswrapper[4769]: I1125 09:47:51.673839 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:47:51 crc kubenswrapper[4769]: I1125 09:47:51.697821 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c4dbm" podStartSLOduration=5.371871428 podStartE2EDuration="1m2.697804672s" podCreationTimestamp="2025-11-25 09:46:49 +0000 UTC" firstStartedPulling="2025-11-25 09:46:51.904569159 +0000 UTC m=+160.489541472" lastFinishedPulling="2025-11-25 09:47:49.230502403 +0000 UTC m=+217.815474716" observedRunningTime="2025-11-25 09:47:49.966545176 +0000 UTC m=+218.551517509" watchObservedRunningTime="2025-11-25 09:47:51.697804672 +0000 UTC m=+220.282776985" Nov 25 09:47:52 crc kubenswrapper[4769]: I1125 09:47:52.292311 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:47:52 crc kubenswrapper[4769]: I1125 09:47:52.292418 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:47:52 crc kubenswrapper[4769]: I1125 09:47:52.292512 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:47:52 crc kubenswrapper[4769]: I1125 09:47:52.293532 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:47:52 crc kubenswrapper[4769]: I1125 09:47:52.293611 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71" gracePeriod=600 Nov 25 09:47:52 crc kubenswrapper[4769]: I1125 09:47:52.882383 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:47:52 crc kubenswrapper[4769]: I1125 09:47:52.882467 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:47:52 crc kubenswrapper[4769]: I1125 09:47:52.918099 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71" exitCode=0 Nov 25 09:47:52 crc kubenswrapper[4769]: I1125 09:47:52.918165 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71"} Nov 25 09:47:53 crc kubenswrapper[4769]: I1125 09:47:53.382717 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:47:53 crc kubenswrapper[4769]: I1125 09:47:53.383217 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:47:53 crc kubenswrapper[4769]: I1125 09:47:53.927677 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"b1cfbd0201a546442e0247c8093220790d373f3d77e8c2c87bca802822113519"} Nov 25 09:47:53 crc kubenswrapper[4769]: I1125 09:47:53.929935 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l662h" event={"ID":"a51756e2-325b-4979-9e50-2a5cf4ca302f","Type":"ContainerStarted","Data":"f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265"} Nov 25 09:47:53 crc kubenswrapper[4769]: I1125 09:47:53.930251 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gfmk8" podUID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" containerName="registry-server" probeResult="failure" output=< Nov 25 09:47:53 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 09:47:53 crc kubenswrapper[4769]: > Nov 25 09:47:53 crc kubenswrapper[4769]: I1125 09:47:53.954839 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l662h" podStartSLOduration=3.973693894 podStartE2EDuration="1m2.954815974s" podCreationTimestamp="2025-11-25 09:46:51 +0000 UTC" firstStartedPulling="2025-11-25 09:46:54.170339149 +0000 UTC m=+162.755311462" lastFinishedPulling="2025-11-25 09:47:53.151461229 +0000 UTC m=+221.736433542" observedRunningTime="2025-11-25 09:47:53.951884802 +0000 UTC m=+222.536857115" watchObservedRunningTime="2025-11-25 09:47:53.954815974 +0000 UTC m=+222.539788287" Nov 25 09:47:54 crc kubenswrapper[4769]: I1125 09:47:54.428660 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccfz8" podUID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" containerName="registry-server" probeResult="failure" output=< Nov 25 09:47:54 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 09:47:54 crc kubenswrapper[4769]: > Nov 25 09:47:59 crc kubenswrapper[4769]: I1125 09:47:59.610834 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:47:59 crc kubenswrapper[4769]: I1125 09:47:59.611906 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:47:59 crc kubenswrapper[4769]: I1125 09:47:59.675692 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:48:00 crc kubenswrapper[4769]: I1125 09:48:00.008108 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:48:00 crc kubenswrapper[4769]: I1125 09:48:00.093075 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:48:00 crc kubenswrapper[4769]: I1125 09:48:00.145753 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:48:00 crc kubenswrapper[4769]: I1125 09:48:00.912858 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qmkgl"] Nov 25 09:48:01 crc kubenswrapper[4769]: I1125 09:48:01.664053 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:48:01 crc kubenswrapper[4769]: I1125 09:48:01.978211 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qmkgl" podUID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" containerName="registry-server" containerID="cri-o://69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778" gracePeriod=2 Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.039290 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.039381 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.078015 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.365835 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.506287 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-utilities\") pod \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\" (UID: \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\") " Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.506756 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2f7f5\" (UniqueName: \"kubernetes.io/projected/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-kube-api-access-2f7f5\") pod \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\" (UID: \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\") " Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.506858 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-catalog-content\") pod \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\" (UID: \"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1\") " Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.507430 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-utilities" (OuterVolumeSpecName: "utilities") pod "1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" (UID: "1c9095dc-a1a8-4e5c-a357-dae9aaf966e1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.519208 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-kube-api-access-2f7f5" (OuterVolumeSpecName: "kube-api-access-2f7f5") pod "1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" (UID: "1c9095dc-a1a8-4e5c-a357-dae9aaf966e1"). InnerVolumeSpecName "kube-api-access-2f7f5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.559912 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" (UID: "1c9095dc-a1a8-4e5c-a357-dae9aaf966e1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.608546 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2f7f5\" (UniqueName: \"kubernetes.io/projected/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-kube-api-access-2f7f5\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.608594 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.608607 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.691792 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-q8hkd"] Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.937496 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.988453 4769 generic.go:334] "Generic (PLEG): container finished" podID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" containerID="69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778" exitCode=0 Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.988574 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmkgl" event={"ID":"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1","Type":"ContainerDied","Data":"69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778"} Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.988610 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qmkgl" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.988659 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qmkgl" event={"ID":"1c9095dc-a1a8-4e5c-a357-dae9aaf966e1","Type":"ContainerDied","Data":"b36b73ca0680f528d6be854b17587d8659daa59a509baffc66fbee88b8b8dcec"} Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.988695 4769 scope.go:117] "RemoveContainer" containerID="69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778" Nov 25 09:48:02 crc kubenswrapper[4769]: I1125 09:48:02.995641 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.020880 4769 scope.go:117] "RemoveContainer" containerID="d1c9002ea306b70de40a42a24bcdd04b415f976ccfd3ae21589995a7f8189314" Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.031588 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.054313 4769 scope.go:117] "RemoveContainer" containerID="009db9fdf743e5c00055bd57aa7943218b5e407657e21873df38b85057ce7480" Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.058864 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qmkgl"] Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.064239 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qmkgl"] Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.083333 4769 scope.go:117] "RemoveContainer" containerID="69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778" Nov 25 09:48:03 crc kubenswrapper[4769]: E1125 09:48:03.084437 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778\": container with ID starting with 69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778 not found: ID does not exist" containerID="69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778" Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.084502 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778"} err="failed to get container status \"69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778\": rpc error: code = NotFound desc = could not find container \"69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778\": container with ID starting with 69c5ee84bfdad6b4231999d06e92cc2130a7bf8f6f710c4b486e55b9ecea8778 not found: ID does not exist" Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.084557 4769 scope.go:117] "RemoveContainer" containerID="d1c9002ea306b70de40a42a24bcdd04b415f976ccfd3ae21589995a7f8189314" Nov 25 09:48:03 crc kubenswrapper[4769]: E1125 09:48:03.086908 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1c9002ea306b70de40a42a24bcdd04b415f976ccfd3ae21589995a7f8189314\": container with ID starting with d1c9002ea306b70de40a42a24bcdd04b415f976ccfd3ae21589995a7f8189314 not found: ID does not exist" containerID="d1c9002ea306b70de40a42a24bcdd04b415f976ccfd3ae21589995a7f8189314" Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.086951 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1c9002ea306b70de40a42a24bcdd04b415f976ccfd3ae21589995a7f8189314"} err="failed to get container status \"d1c9002ea306b70de40a42a24bcdd04b415f976ccfd3ae21589995a7f8189314\": rpc error: code = NotFound desc = could not find container \"d1c9002ea306b70de40a42a24bcdd04b415f976ccfd3ae21589995a7f8189314\": container with ID starting with d1c9002ea306b70de40a42a24bcdd04b415f976ccfd3ae21589995a7f8189314 not found: ID does not exist" Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.086991 4769 scope.go:117] "RemoveContainer" containerID="009db9fdf743e5c00055bd57aa7943218b5e407657e21873df38b85057ce7480" Nov 25 09:48:03 crc kubenswrapper[4769]: E1125 09:48:03.087895 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"009db9fdf743e5c00055bd57aa7943218b5e407657e21873df38b85057ce7480\": container with ID starting with 009db9fdf743e5c00055bd57aa7943218b5e407657e21873df38b85057ce7480 not found: ID does not exist" containerID="009db9fdf743e5c00055bd57aa7943218b5e407657e21873df38b85057ce7480" Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.087932 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"009db9fdf743e5c00055bd57aa7943218b5e407657e21873df38b85057ce7480"} err="failed to get container status \"009db9fdf743e5c00055bd57aa7943218b5e407657e21873df38b85057ce7480\": rpc error: code = NotFound desc = could not find container \"009db9fdf743e5c00055bd57aa7943218b5e407657e21873df38b85057ce7480\": container with ID starting with 009db9fdf743e5c00055bd57aa7943218b5e407657e21873df38b85057ce7480 not found: ID does not exist" Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.432229 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:48:03 crc kubenswrapper[4769]: I1125 09:48:03.477140 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:48:04 crc kubenswrapper[4769]: I1125 09:48:04.244694 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" path="/var/lib/kubelet/pods/1c9095dc-a1a8-4e5c-a357-dae9aaf966e1/volumes" Nov 25 09:48:04 crc kubenswrapper[4769]: I1125 09:48:04.911059 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l662h"] Nov 25 09:48:05 crc kubenswrapper[4769]: I1125 09:48:05.000872 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l662h" podUID="a51756e2-325b-4979-9e50-2a5cf4ca302f" containerName="registry-server" containerID="cri-o://f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265" gracePeriod=2 Nov 25 09:48:05 crc kubenswrapper[4769]: I1125 09:48:05.964707 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.008788 4769 generic.go:334] "Generic (PLEG): container finished" podID="a51756e2-325b-4979-9e50-2a5cf4ca302f" containerID="f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265" exitCode=0 Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.008839 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l662h" event={"ID":"a51756e2-325b-4979-9e50-2a5cf4ca302f","Type":"ContainerDied","Data":"f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265"} Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.008884 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l662h" event={"ID":"a51756e2-325b-4979-9e50-2a5cf4ca302f","Type":"ContainerDied","Data":"9b7aedf0afe559df148da62554d283eca7e1dc3e713c3dae96c676cc853aa952"} Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.008905 4769 scope.go:117] "RemoveContainer" containerID="f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.008925 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l662h" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.031667 4769 scope.go:117] "RemoveContainer" containerID="e5b2bdff3d629d95f74991c3dd951dd5ab656e9d7c5baf1cb25db2cce09c4e98" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.046852 4769 scope.go:117] "RemoveContainer" containerID="ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.060203 4769 scope.go:117] "RemoveContainer" containerID="f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.060697 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a51756e2-325b-4979-9e50-2a5cf4ca302f-catalog-content\") pod \"a51756e2-325b-4979-9e50-2a5cf4ca302f\" (UID: \"a51756e2-325b-4979-9e50-2a5cf4ca302f\") " Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.060846 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a51756e2-325b-4979-9e50-2a5cf4ca302f-utilities\") pod \"a51756e2-325b-4979-9e50-2a5cf4ca302f\" (UID: \"a51756e2-325b-4979-9e50-2a5cf4ca302f\") " Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.060933 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzflc\" (UniqueName: \"kubernetes.io/projected/a51756e2-325b-4979-9e50-2a5cf4ca302f-kube-api-access-kzflc\") pod \"a51756e2-325b-4979-9e50-2a5cf4ca302f\" (UID: \"a51756e2-325b-4979-9e50-2a5cf4ca302f\") " Nov 25 09:48:06 crc kubenswrapper[4769]: E1125 09:48:06.061506 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265\": container with ID starting with f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265 not found: ID does not exist" containerID="f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.061561 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265"} err="failed to get container status \"f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265\": rpc error: code = NotFound desc = could not find container \"f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265\": container with ID starting with f5edd2240dd5994291c7d4b954bdd597d4bc5034c53b794fccdf92fda9d10265 not found: ID does not exist" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.061597 4769 scope.go:117] "RemoveContainer" containerID="e5b2bdff3d629d95f74991c3dd951dd5ab656e9d7c5baf1cb25db2cce09c4e98" Nov 25 09:48:06 crc kubenswrapper[4769]: E1125 09:48:06.061989 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5b2bdff3d629d95f74991c3dd951dd5ab656e9d7c5baf1cb25db2cce09c4e98\": container with ID starting with e5b2bdff3d629d95f74991c3dd951dd5ab656e9d7c5baf1cb25db2cce09c4e98 not found: ID does not exist" containerID="e5b2bdff3d629d95f74991c3dd951dd5ab656e9d7c5baf1cb25db2cce09c4e98" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.062020 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5b2bdff3d629d95f74991c3dd951dd5ab656e9d7c5baf1cb25db2cce09c4e98"} err="failed to get container status \"e5b2bdff3d629d95f74991c3dd951dd5ab656e9d7c5baf1cb25db2cce09c4e98\": rpc error: code = NotFound desc = could not find container \"e5b2bdff3d629d95f74991c3dd951dd5ab656e9d7c5baf1cb25db2cce09c4e98\": container with ID starting with e5b2bdff3d629d95f74991c3dd951dd5ab656e9d7c5baf1cb25db2cce09c4e98 not found: ID does not exist" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.062057 4769 scope.go:117] "RemoveContainer" containerID="ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.062293 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a51756e2-325b-4979-9e50-2a5cf4ca302f-utilities" (OuterVolumeSpecName: "utilities") pod "a51756e2-325b-4979-9e50-2a5cf4ca302f" (UID: "a51756e2-325b-4979-9e50-2a5cf4ca302f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:06 crc kubenswrapper[4769]: E1125 09:48:06.062521 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7\": container with ID starting with ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7 not found: ID does not exist" containerID="ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.062566 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7"} err="failed to get container status \"ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7\": rpc error: code = NotFound desc = could not find container \"ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7\": container with ID starting with ef10e7a9ea34ddb131a14d6d99702e27b8f0476d5d0bf6a2e2057c5444dc7ee7 not found: ID does not exist" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.069239 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a51756e2-325b-4979-9e50-2a5cf4ca302f-kube-api-access-kzflc" (OuterVolumeSpecName: "kube-api-access-kzflc") pod "a51756e2-325b-4979-9e50-2a5cf4ca302f" (UID: "a51756e2-325b-4979-9e50-2a5cf4ca302f"). InnerVolumeSpecName "kube-api-access-kzflc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.079147 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a51756e2-325b-4979-9e50-2a5cf4ca302f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a51756e2-325b-4979-9e50-2a5cf4ca302f" (UID: "a51756e2-325b-4979-9e50-2a5cf4ca302f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.162384 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a51756e2-325b-4979-9e50-2a5cf4ca302f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.162432 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzflc\" (UniqueName: \"kubernetes.io/projected/a51756e2-325b-4979-9e50-2a5cf4ca302f-kube-api-access-kzflc\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.162448 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a51756e2-325b-4979-9e50-2a5cf4ca302f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.333329 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l662h"] Nov 25 09:48:06 crc kubenswrapper[4769]: I1125 09:48:06.336516 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l662h"] Nov 25 09:48:07 crc kubenswrapper[4769]: I1125 09:48:07.317012 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ccfz8"] Nov 25 09:48:07 crc kubenswrapper[4769]: I1125 09:48:07.320043 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ccfz8" podUID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" containerName="registry-server" containerID="cri-o://0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd" gracePeriod=2 Nov 25 09:48:07 crc kubenswrapper[4769]: I1125 09:48:07.669285 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:48:07 crc kubenswrapper[4769]: I1125 09:48:07.782251 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2v8d6\" (UniqueName: \"kubernetes.io/projected/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-kube-api-access-2v8d6\") pod \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\" (UID: \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\") " Nov 25 09:48:07 crc kubenswrapper[4769]: I1125 09:48:07.782340 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-utilities\") pod \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\" (UID: \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\") " Nov 25 09:48:07 crc kubenswrapper[4769]: I1125 09:48:07.782488 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-catalog-content\") pod \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\" (UID: \"71d2d50a-7ac8-42a1-ac73-4709f6e57ada\") " Nov 25 09:48:07 crc kubenswrapper[4769]: I1125 09:48:07.783509 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-utilities" (OuterVolumeSpecName: "utilities") pod "71d2d50a-7ac8-42a1-ac73-4709f6e57ada" (UID: "71d2d50a-7ac8-42a1-ac73-4709f6e57ada"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:07 crc kubenswrapper[4769]: I1125 09:48:07.789411 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-kube-api-access-2v8d6" (OuterVolumeSpecName: "kube-api-access-2v8d6") pod "71d2d50a-7ac8-42a1-ac73-4709f6e57ada" (UID: "71d2d50a-7ac8-42a1-ac73-4709f6e57ada"). InnerVolumeSpecName "kube-api-access-2v8d6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:07 crc kubenswrapper[4769]: I1125 09:48:07.884114 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2v8d6\" (UniqueName: \"kubernetes.io/projected/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-kube-api-access-2v8d6\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:07 crc kubenswrapper[4769]: I1125 09:48:07.884164 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:07 crc kubenswrapper[4769]: I1125 09:48:07.902132 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "71d2d50a-7ac8-42a1-ac73-4709f6e57ada" (UID: "71d2d50a-7ac8-42a1-ac73-4709f6e57ada"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:07 crc kubenswrapper[4769]: I1125 09:48:07.985994 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71d2d50a-7ac8-42a1-ac73-4709f6e57ada-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.033135 4769 generic.go:334] "Generic (PLEG): container finished" podID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" containerID="0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd" exitCode=0 Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.033215 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ccfz8" Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.033248 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccfz8" event={"ID":"71d2d50a-7ac8-42a1-ac73-4709f6e57ada","Type":"ContainerDied","Data":"0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd"} Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.034142 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccfz8" event={"ID":"71d2d50a-7ac8-42a1-ac73-4709f6e57ada","Type":"ContainerDied","Data":"9fbc7a7123eba42b61b221c9153dd1f41f21aacd3f46db7817a31f7c9bb8b343"} Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.034237 4769 scope.go:117] "RemoveContainer" containerID="0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd" Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.054505 4769 scope.go:117] "RemoveContainer" containerID="23fdac6686aa4c8837bfde3e9a6478ccad902891888f04b9f1de1c19cadddca2" Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.070689 4769 scope.go:117] "RemoveContainer" containerID="bac4506288a76578f1e33c2f151ed1e6969582b7679f6f76937085cf47fd61dd" Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.079083 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ccfz8"] Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.094826 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ccfz8"] Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.105498 4769 scope.go:117] "RemoveContainer" containerID="0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd" Nov 25 09:48:08 crc kubenswrapper[4769]: E1125 09:48:08.106024 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd\": container with ID starting with 0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd not found: ID does not exist" containerID="0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd" Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.106079 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd"} err="failed to get container status \"0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd\": rpc error: code = NotFound desc = could not find container \"0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd\": container with ID starting with 0e729677665236758bc04cc6a2180873ffe1c0ed7771a27c96cfdf63d6156afd not found: ID does not exist" Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.106106 4769 scope.go:117] "RemoveContainer" containerID="23fdac6686aa4c8837bfde3e9a6478ccad902891888f04b9f1de1c19cadddca2" Nov 25 09:48:08 crc kubenswrapper[4769]: E1125 09:48:08.106582 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23fdac6686aa4c8837bfde3e9a6478ccad902891888f04b9f1de1c19cadddca2\": container with ID starting with 23fdac6686aa4c8837bfde3e9a6478ccad902891888f04b9f1de1c19cadddca2 not found: ID does not exist" containerID="23fdac6686aa4c8837bfde3e9a6478ccad902891888f04b9f1de1c19cadddca2" Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.106611 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23fdac6686aa4c8837bfde3e9a6478ccad902891888f04b9f1de1c19cadddca2"} err="failed to get container status \"23fdac6686aa4c8837bfde3e9a6478ccad902891888f04b9f1de1c19cadddca2\": rpc error: code = NotFound desc = could not find container \"23fdac6686aa4c8837bfde3e9a6478ccad902891888f04b9f1de1c19cadddca2\": container with ID starting with 23fdac6686aa4c8837bfde3e9a6478ccad902891888f04b9f1de1c19cadddca2 not found: ID does not exist" Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.106630 4769 scope.go:117] "RemoveContainer" containerID="bac4506288a76578f1e33c2f151ed1e6969582b7679f6f76937085cf47fd61dd" Nov 25 09:48:08 crc kubenswrapper[4769]: E1125 09:48:08.106916 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bac4506288a76578f1e33c2f151ed1e6969582b7679f6f76937085cf47fd61dd\": container with ID starting with bac4506288a76578f1e33c2f151ed1e6969582b7679f6f76937085cf47fd61dd not found: ID does not exist" containerID="bac4506288a76578f1e33c2f151ed1e6969582b7679f6f76937085cf47fd61dd" Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.106944 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bac4506288a76578f1e33c2f151ed1e6969582b7679f6f76937085cf47fd61dd"} err="failed to get container status \"bac4506288a76578f1e33c2f151ed1e6969582b7679f6f76937085cf47fd61dd\": rpc error: code = NotFound desc = could not find container \"bac4506288a76578f1e33c2f151ed1e6969582b7679f6f76937085cf47fd61dd\": container with ID starting with bac4506288a76578f1e33c2f151ed1e6969582b7679f6f76937085cf47fd61dd not found: ID does not exist" Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.254116 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" path="/var/lib/kubelet/pods/71d2d50a-7ac8-42a1-ac73-4709f6e57ada/volumes" Nov 25 09:48:08 crc kubenswrapper[4769]: I1125 09:48:08.254752 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a51756e2-325b-4979-9e50-2a5cf4ca302f" path="/var/lib/kubelet/pods/a51756e2-325b-4979-9e50-2a5cf4ca302f/volumes" Nov 25 09:48:27 crc kubenswrapper[4769]: I1125 09:48:27.733564 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" podUID="00b22ed8-e9b5-4173-8574-b06254cd0965" containerName="oauth-openshift" containerID="cri-o://0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b" gracePeriod=15 Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.106512 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.159265 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd"] Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.160372 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1f73c3a-3ade-470b-9e77-000116d1b631" containerName="registry-server" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.160721 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1f73c3a-3ade-470b-9e77-000116d1b631" containerName="registry-server" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.160780 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a51756e2-325b-4979-9e50-2a5cf4ca302f" containerName="extract-content" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.160789 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a51756e2-325b-4979-9e50-2a5cf4ca302f" containerName="extract-content" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.160807 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" containerName="extract-utilities" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.160815 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" containerName="extract-utilities" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.160826 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a51756e2-325b-4979-9e50-2a5cf4ca302f" containerName="registry-server" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.160839 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a51756e2-325b-4979-9e50-2a5cf4ca302f" containerName="registry-server" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.162245 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a51756e2-325b-4979-9e50-2a5cf4ca302f" containerName="extract-utilities" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.167493 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a51756e2-325b-4979-9e50-2a5cf4ca302f" containerName="extract-utilities" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.167537 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" containerName="extract-content" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.167548 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" containerName="extract-content" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.167571 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b22ed8-e9b5-4173-8574-b06254cd0965" containerName="oauth-openshift" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.167579 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b22ed8-e9b5-4173-8574-b06254cd0965" containerName="oauth-openshift" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.167621 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" containerName="extract-utilities" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.167629 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" containerName="extract-utilities" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.167636 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" containerName="registry-server" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.167649 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" containerName="registry-server" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.167668 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" containerName="registry-server" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.167675 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" containerName="registry-server" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.167685 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1f73c3a-3ade-470b-9e77-000116d1b631" containerName="extract-content" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.167692 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1f73c3a-3ade-470b-9e77-000116d1b631" containerName="extract-content" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.167704 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" containerName="extract-content" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.167711 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" containerName="extract-content" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.167723 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1f73c3a-3ade-470b-9e77-000116d1b631" containerName="extract-utilities" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.167730 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1f73c3a-3ade-470b-9e77-000116d1b631" containerName="extract-utilities" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.167744 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64dbbafa-5fc1-4500-9b49-9d344bf6b356" containerName="pruner" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.167751 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="64dbbafa-5fc1-4500-9b49-9d344bf6b356" containerName="pruner" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.168250 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c9095dc-a1a8-4e5c-a357-dae9aaf966e1" containerName="registry-server" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.168275 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1f73c3a-3ade-470b-9e77-000116d1b631" containerName="registry-server" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.168296 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="71d2d50a-7ac8-42a1-ac73-4709f6e57ada" containerName="registry-server" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.168307 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="a51756e2-325b-4979-9e50-2a5cf4ca302f" containerName="registry-server" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.169715 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="64dbbafa-5fc1-4500-9b49-9d344bf6b356" containerName="pruner" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.169806 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="00b22ed8-e9b5-4173-8574-b06254cd0965" containerName="oauth-openshift" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.173672 4769 generic.go:334] "Generic (PLEG): container finished" podID="00b22ed8-e9b5-4173-8574-b06254cd0965" containerID="0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b" exitCode=0 Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.173857 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.176498 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" event={"ID":"00b22ed8-e9b5-4173-8574-b06254cd0965","Type":"ContainerDied","Data":"0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b"} Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.176576 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-q8hkd" event={"ID":"00b22ed8-e9b5-4173-8574-b06254cd0965","Type":"ContainerDied","Data":"6129d3e09c7d70decc52aea7155296f097686359b06532a0df19c51ec9f3e801"} Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.176605 4769 scope.go:117] "RemoveContainer" containerID="0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.176674 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.178180 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd"] Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.208492 4769 scope.go:117] "RemoveContainer" containerID="0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b" Nov 25 09:48:28 crc kubenswrapper[4769]: E1125 09:48:28.212539 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b\": container with ID starting with 0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b not found: ID does not exist" containerID="0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.212586 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b"} err="failed to get container status \"0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b\": rpc error: code = NotFound desc = could not find container \"0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b\": container with ID starting with 0fb4947d310348df51fde3601fe93fb61d2f6369e11caeff3ce7bcf87df2667b not found: ID does not exist" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.285582 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-error\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.285644 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-ocp-branding-template\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.285687 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-idp-0-file-data\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.285720 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-login\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.285748 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-provider-selection\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.285796 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/00b22ed8-e9b5-4173-8574-b06254cd0965-audit-dir\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.285865 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-serving-cert\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.285911 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-session\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.285952 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-cliconfig\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286003 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-service-ca\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286040 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-audit-policies\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286033 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/00b22ed8-e9b5-4173-8574-b06254cd0965-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286080 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-trusted-ca-bundle\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286124 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4djqh\" (UniqueName: \"kubernetes.io/projected/00b22ed8-e9b5-4173-8574-b06254cd0965-kube-api-access-4djqh\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286175 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-router-certs\") pod \"00b22ed8-e9b5-4173-8574-b06254cd0965\" (UID: \"00b22ed8-e9b5-4173-8574-b06254cd0965\") " Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286366 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/32d90a24-d05b-438f-a46f-4d3663ccb171-audit-dir\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286408 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-cliconfig\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286457 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hldh\" (UniqueName: \"kubernetes.io/projected/32d90a24-d05b-438f-a46f-4d3663ccb171-kube-api-access-5hldh\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286491 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-router-certs\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286523 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-service-ca\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286544 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286571 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286606 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-user-template-login\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286647 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-session\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286676 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286709 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/32d90a24-d05b-438f-a46f-4d3663ccb171-audit-policies\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286741 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-user-template-error\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286774 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286801 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-serving-cert\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.286879 4769 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/00b22ed8-e9b5-4173-8574-b06254cd0965-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.287318 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.287351 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.287359 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.287415 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.294351 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00b22ed8-e9b5-4173-8574-b06254cd0965-kube-api-access-4djqh" (OuterVolumeSpecName: "kube-api-access-4djqh") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "kube-api-access-4djqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.295176 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.295435 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.296538 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.297026 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.297824 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.298014 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.298308 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.298743 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "00b22ed8-e9b5-4173-8574-b06254cd0965" (UID: "00b22ed8-e9b5-4173-8574-b06254cd0965"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.390862 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-session\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.391469 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.391564 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/32d90a24-d05b-438f-a46f-4d3663ccb171-audit-policies\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.391616 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-user-template-error\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.391681 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.391722 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-serving-cert\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.391754 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/32d90a24-d05b-438f-a46f-4d3663ccb171-audit-dir\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.391799 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-cliconfig\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.391883 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hldh\" (UniqueName: \"kubernetes.io/projected/32d90a24-d05b-438f-a46f-4d3663ccb171-kube-api-access-5hldh\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.391928 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-router-certs\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.391977 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392005 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-service-ca\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392040 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392087 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-user-template-login\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392676 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392703 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392722 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392741 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392757 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392770 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392783 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392802 4769 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392840 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392885 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4djqh\" (UniqueName: \"kubernetes.io/projected/00b22ed8-e9b5-4173-8574-b06254cd0965-kube-api-access-4djqh\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392897 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392909 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392923 4769 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/00b22ed8-e9b5-4173-8574-b06254cd0965-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392907 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/32d90a24-d05b-438f-a46f-4d3663ccb171-audit-dir\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.392776 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-cliconfig\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.393120 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/32d90a24-d05b-438f-a46f-4d3663ccb171-audit-policies\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.393988 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-service-ca\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.396656 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.397479 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.397608 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-serving-cert\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.397990 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-user-template-login\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.398486 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.398536 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-router-certs\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.398648 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.399243 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-system-session\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.400738 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/32d90a24-d05b-438f-a46f-4d3663ccb171-v4-0-config-user-template-error\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.410944 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hldh\" (UniqueName: \"kubernetes.io/projected/32d90a24-d05b-438f-a46f-4d3663ccb171-kube-api-access-5hldh\") pod \"oauth-openshift-64f9fb64bf-6x7dd\" (UID: \"32d90a24-d05b-438f-a46f-4d3663ccb171\") " pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.496156 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.507573 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-q8hkd"] Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.512278 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-q8hkd"] Nov 25 09:48:28 crc kubenswrapper[4769]: I1125 09:48:28.718441 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd"] Nov 25 09:48:29 crc kubenswrapper[4769]: I1125 09:48:29.203383 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" event={"ID":"32d90a24-d05b-438f-a46f-4d3663ccb171","Type":"ContainerStarted","Data":"c7b4c1e965786d3121f3879fd6b78575ee175be9dba41cc5c1243b9bb26012f9"} Nov 25 09:48:29 crc kubenswrapper[4769]: I1125 09:48:29.203866 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" event={"ID":"32d90a24-d05b-438f-a46f-4d3663ccb171","Type":"ContainerStarted","Data":"0aa4e2803c25970f016ad823135836e42acb378e62c394da262aa44cb5d61b40"} Nov 25 09:48:29 crc kubenswrapper[4769]: I1125 09:48:29.203903 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:29 crc kubenswrapper[4769]: I1125 09:48:29.229787 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" podStartSLOduration=27.229759157 podStartE2EDuration="27.229759157s" podCreationTimestamp="2025-11-25 09:48:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:48:29.225340673 +0000 UTC m=+257.810313016" watchObservedRunningTime="2025-11-25 09:48:29.229759157 +0000 UTC m=+257.814731470" Nov 25 09:48:29 crc kubenswrapper[4769]: I1125 09:48:29.308219 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" Nov 25 09:48:30 crc kubenswrapper[4769]: I1125 09:48:30.244897 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00b22ed8-e9b5-4173-8574-b06254cd0965" path="/var/lib/kubelet/pods/00b22ed8-e9b5-4173-8574-b06254cd0965/volumes" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.126411 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c4dbm"] Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.127471 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c4dbm" podUID="b8cf525e-b1f8-447d-bde3-7a8746836e39" containerName="registry-server" containerID="cri-o://36e244230874911c54b925004fc09cc221d6adfe7e092bb37a5380d83090d852" gracePeriod=30 Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.139694 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7m4kh"] Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.140057 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7m4kh" podUID="1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" containerName="registry-server" containerID="cri-o://00ffee69e6cd4dd3ef63b23b942dd7908bdb19a5d709fe8f3361759d445d04a6" gracePeriod=30 Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.150221 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rdchw"] Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.150549 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" podUID="d9929f0c-776d-4583-94ca-bd665b5d9783" containerName="marketplace-operator" containerID="cri-o://c187e12d695d96e42ebe72a492eb7ef7293e8b5cbf52078fdcf6d6e736636d13" gracePeriod=30 Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.160665 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7kqx5"] Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.161002 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7kqx5" podUID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" containerName="registry-server" containerID="cri-o://e60ca5c15e6f4f364e711a1439220217cfb50c172de86c71e130e03ad1edc5d6" gracePeriod=30 Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.172021 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wn2f7"] Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.173213 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.183275 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gfmk8"] Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.183637 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gfmk8" podUID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" containerName="registry-server" containerID="cri-o://0fc225b6eb3ce8c6318681fc3e39185462b760f526c4c60711cb422454666784" gracePeriod=30 Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.187389 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wn2f7"] Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.217243 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2ef0d2ad-687b-4157-8ab5-803122670e19-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wn2f7\" (UID: \"2ef0d2ad-687b-4157-8ab5-803122670e19\") " pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.217325 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2ef0d2ad-687b-4157-8ab5-803122670e19-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wn2f7\" (UID: \"2ef0d2ad-687b-4157-8ab5-803122670e19\") " pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.217355 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km4q9\" (UniqueName: \"kubernetes.io/projected/2ef0d2ad-687b-4157-8ab5-803122670e19-kube-api-access-km4q9\") pod \"marketplace-operator-79b997595-wn2f7\" (UID: \"2ef0d2ad-687b-4157-8ab5-803122670e19\") " pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.318310 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2ef0d2ad-687b-4157-8ab5-803122670e19-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wn2f7\" (UID: \"2ef0d2ad-687b-4157-8ab5-803122670e19\") " pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.318885 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2ef0d2ad-687b-4157-8ab5-803122670e19-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wn2f7\" (UID: \"2ef0d2ad-687b-4157-8ab5-803122670e19\") " pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.318911 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km4q9\" (UniqueName: \"kubernetes.io/projected/2ef0d2ad-687b-4157-8ab5-803122670e19-kube-api-access-km4q9\") pod \"marketplace-operator-79b997595-wn2f7\" (UID: \"2ef0d2ad-687b-4157-8ab5-803122670e19\") " pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.320737 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2ef0d2ad-687b-4157-8ab5-803122670e19-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wn2f7\" (UID: \"2ef0d2ad-687b-4157-8ab5-803122670e19\") " pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.326697 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2ef0d2ad-687b-4157-8ab5-803122670e19-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wn2f7\" (UID: \"2ef0d2ad-687b-4157-8ab5-803122670e19\") " pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.368882 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km4q9\" (UniqueName: \"kubernetes.io/projected/2ef0d2ad-687b-4157-8ab5-803122670e19-kube-api-access-km4q9\") pod \"marketplace-operator-79b997595-wn2f7\" (UID: \"2ef0d2ad-687b-4157-8ab5-803122670e19\") " pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.399792 4769 generic.go:334] "Generic (PLEG): container finished" podID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" containerID="e60ca5c15e6f4f364e711a1439220217cfb50c172de86c71e130e03ad1edc5d6" exitCode=0 Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.399932 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7kqx5" event={"ID":"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5","Type":"ContainerDied","Data":"e60ca5c15e6f4f364e711a1439220217cfb50c172de86c71e130e03ad1edc5d6"} Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.415631 4769 generic.go:334] "Generic (PLEG): container finished" podID="1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" containerID="00ffee69e6cd4dd3ef63b23b942dd7908bdb19a5d709fe8f3361759d445d04a6" exitCode=0 Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.415766 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7m4kh" event={"ID":"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6","Type":"ContainerDied","Data":"00ffee69e6cd4dd3ef63b23b942dd7908bdb19a5d709fe8f3361759d445d04a6"} Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.422119 4769 generic.go:334] "Generic (PLEG): container finished" podID="b8cf525e-b1f8-447d-bde3-7a8746836e39" containerID="36e244230874911c54b925004fc09cc221d6adfe7e092bb37a5380d83090d852" exitCode=0 Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.422267 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4dbm" event={"ID":"b8cf525e-b1f8-447d-bde3-7a8746836e39","Type":"ContainerDied","Data":"36e244230874911c54b925004fc09cc221d6adfe7e092bb37a5380d83090d852"} Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.424653 4769 generic.go:334] "Generic (PLEG): container finished" podID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" containerID="0fc225b6eb3ce8c6318681fc3e39185462b760f526c4c60711cb422454666784" exitCode=0 Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.424742 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gfmk8" event={"ID":"51cc9569-8a6c-473c-a6c7-628c4c7e1aed","Type":"ContainerDied","Data":"0fc225b6eb3ce8c6318681fc3e39185462b760f526c4c60711cb422454666784"} Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.427479 4769 generic.go:334] "Generic (PLEG): container finished" podID="d9929f0c-776d-4583-94ca-bd665b5d9783" containerID="c187e12d695d96e42ebe72a492eb7ef7293e8b5cbf52078fdcf6d6e736636d13" exitCode=0 Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.427612 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" event={"ID":"d9929f0c-776d-4583-94ca-bd665b5d9783","Type":"ContainerDied","Data":"c187e12d695d96e42ebe72a492eb7ef7293e8b5cbf52078fdcf6d6e736636d13"} Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.491295 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.643296 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.707505 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.724821 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8cf525e-b1f8-447d-bde3-7a8746836e39-utilities\") pod \"b8cf525e-b1f8-447d-bde3-7a8746836e39\" (UID: \"b8cf525e-b1f8-447d-bde3-7a8746836e39\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.724976 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qtmmr\" (UniqueName: \"kubernetes.io/projected/b8cf525e-b1f8-447d-bde3-7a8746836e39-kube-api-access-qtmmr\") pod \"b8cf525e-b1f8-447d-bde3-7a8746836e39\" (UID: \"b8cf525e-b1f8-447d-bde3-7a8746836e39\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.725000 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8cf525e-b1f8-447d-bde3-7a8746836e39-catalog-content\") pod \"b8cf525e-b1f8-447d-bde3-7a8746836e39\" (UID: \"b8cf525e-b1f8-447d-bde3-7a8746836e39\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.726718 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8cf525e-b1f8-447d-bde3-7a8746836e39-utilities" (OuterVolumeSpecName: "utilities") pod "b8cf525e-b1f8-447d-bde3-7a8746836e39" (UID: "b8cf525e-b1f8-447d-bde3-7a8746836e39"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.749349 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8cf525e-b1f8-447d-bde3-7a8746836e39-kube-api-access-qtmmr" (OuterVolumeSpecName: "kube-api-access-qtmmr") pod "b8cf525e-b1f8-447d-bde3-7a8746836e39" (UID: "b8cf525e-b1f8-447d-bde3-7a8746836e39"). InnerVolumeSpecName "kube-api-access-qtmmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.804510 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8cf525e-b1f8-447d-bde3-7a8746836e39-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b8cf525e-b1f8-447d-bde3-7a8746836e39" (UID: "b8cf525e-b1f8-447d-bde3-7a8746836e39"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.826083 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-catalog-content\") pod \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\" (UID: \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.826193 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-utilities\") pod \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\" (UID: \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.826223 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zccjr\" (UniqueName: \"kubernetes.io/projected/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-kube-api-access-zccjr\") pod \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\" (UID: \"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.826549 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qtmmr\" (UniqueName: \"kubernetes.io/projected/b8cf525e-b1f8-447d-bde3-7a8746836e39-kube-api-access-qtmmr\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.826561 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8cf525e-b1f8-447d-bde3-7a8746836e39-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.826571 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8cf525e-b1f8-447d-bde3-7a8746836e39-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.827260 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-utilities" (OuterVolumeSpecName: "utilities") pod "1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" (UID: "1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.838340 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-kube-api-access-zccjr" (OuterVolumeSpecName: "kube-api-access-zccjr") pod "1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" (UID: "1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6"). InnerVolumeSpecName "kube-api-access-zccjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.840817 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.856651 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.860653 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.922902 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wn2f7"] Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.928922 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-catalog-content\") pod \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\" (UID: \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.929092 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbhv6\" (UniqueName: \"kubernetes.io/projected/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-kube-api-access-tbhv6\") pod \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\" (UID: \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.929250 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fs2xl\" (UniqueName: \"kubernetes.io/projected/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-kube-api-access-fs2xl\") pod \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\" (UID: \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.930692 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-utilities\") pod \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\" (UID: \"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.930739 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-trusted-ca\") pod \"d9929f0c-776d-4583-94ca-bd665b5d9783\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.930900 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-catalog-content\") pod \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\" (UID: \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.931071 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtwq4\" (UniqueName: \"kubernetes.io/projected/d9929f0c-776d-4583-94ca-bd665b5d9783-kube-api-access-gtwq4\") pod \"d9929f0c-776d-4583-94ca-bd665b5d9783\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.931253 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-utilities\") pod \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\" (UID: \"51cc9569-8a6c-473c-a6c7-628c4c7e1aed\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.931389 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-operator-metrics\") pod \"d9929f0c-776d-4583-94ca-bd665b5d9783\" (UID: \"d9929f0c-776d-4583-94ca-bd665b5d9783\") " Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.931982 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.932029 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zccjr\" (UniqueName: \"kubernetes.io/projected/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-kube-api-access-zccjr\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.932914 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-utilities" (OuterVolumeSpecName: "utilities") pod "ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" (UID: "ff08cdaf-0d49-4f4d-bc40-977a970ef1a5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.933115 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "d9929f0c-776d-4583-94ca-bd665b5d9783" (UID: "d9929f0c-776d-4583-94ca-bd665b5d9783"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.933259 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-utilities" (OuterVolumeSpecName: "utilities") pod "51cc9569-8a6c-473c-a6c7-628c4c7e1aed" (UID: "51cc9569-8a6c-473c-a6c7-628c4c7e1aed"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.933905 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-kube-api-access-fs2xl" (OuterVolumeSpecName: "kube-api-access-fs2xl") pod "51cc9569-8a6c-473c-a6c7-628c4c7e1aed" (UID: "51cc9569-8a6c-473c-a6c7-628c4c7e1aed"). InnerVolumeSpecName "kube-api-access-fs2xl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.935410 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-kube-api-access-tbhv6" (OuterVolumeSpecName: "kube-api-access-tbhv6") pod "ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" (UID: "ff08cdaf-0d49-4f4d-bc40-977a970ef1a5"). InnerVolumeSpecName "kube-api-access-tbhv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.935785 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9929f0c-776d-4583-94ca-bd665b5d9783-kube-api-access-gtwq4" (OuterVolumeSpecName: "kube-api-access-gtwq4") pod "d9929f0c-776d-4583-94ca-bd665b5d9783" (UID: "d9929f0c-776d-4583-94ca-bd665b5d9783"). InnerVolumeSpecName "kube-api-access-gtwq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.938482 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "d9929f0c-776d-4583-94ca-bd665b5d9783" (UID: "d9929f0c-776d-4583-94ca-bd665b5d9783"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.940660 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" (UID: "1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:50 crc kubenswrapper[4769]: I1125 09:48:50.948858 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" (UID: "ff08cdaf-0d49-4f4d-bc40-977a970ef1a5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.034040 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtwq4\" (UniqueName: \"kubernetes.io/projected/d9929f0c-776d-4583-94ca-bd665b5d9783-kube-api-access-gtwq4\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.034077 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.034088 4769 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.034097 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.034107 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.034115 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbhv6\" (UniqueName: \"kubernetes.io/projected/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-kube-api-access-tbhv6\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.034125 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fs2xl\" (UniqueName: \"kubernetes.io/projected/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-kube-api-access-fs2xl\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.034135 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.034144 4769 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9929f0c-776d-4583-94ca-bd665b5d9783-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.046690 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "51cc9569-8a6c-473c-a6c7-628c4c7e1aed" (UID: "51cc9569-8a6c-473c-a6c7-628c4c7e1aed"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.135774 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51cc9569-8a6c-473c-a6c7-628c4c7e1aed-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.437924 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7m4kh" event={"ID":"1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6","Type":"ContainerDied","Data":"257dc786b88284fdb335d6bb8b664924b92e294e08cd8b9dd79c5cc73ef9a47c"} Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.438150 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7m4kh" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.439288 4769 scope.go:117] "RemoveContainer" containerID="00ffee69e6cd4dd3ef63b23b942dd7908bdb19a5d709fe8f3361759d445d04a6" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.442403 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c4dbm" event={"ID":"b8cf525e-b1f8-447d-bde3-7a8746836e39","Type":"ContainerDied","Data":"16f618697d04c78e8cc801a034937f7a127461d56feb7f497c52ff84387864a0"} Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.442682 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c4dbm" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.446284 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gfmk8" event={"ID":"51cc9569-8a6c-473c-a6c7-628c4c7e1aed","Type":"ContainerDied","Data":"f8442b261fa35fc207e0b706f146b2935cf1870321c925767662322f45df0cc0"} Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.446446 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gfmk8" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.450155 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" event={"ID":"d9929f0c-776d-4583-94ca-bd665b5d9783","Type":"ContainerDied","Data":"27a615cefc6c94bcfb4b39b17b337e553b1bb6f8c8fe2c56ea60e7033286c022"} Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.450174 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-rdchw" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.454294 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" event={"ID":"2ef0d2ad-687b-4157-8ab5-803122670e19","Type":"ContainerStarted","Data":"fa739dd9dc65c0844cf05aaaad8bb1b9d21c38e70250a0e796b42c2d34e32e85"} Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.454352 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" event={"ID":"2ef0d2ad-687b-4157-8ab5-803122670e19","Type":"ContainerStarted","Data":"31f9638514fe33346c0f8130394b9e65a9e53f27c8200d75b50762dc306edb61"} Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.454480 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.460352 4769 scope.go:117] "RemoveContainer" containerID="71226d5395abb437d9f1eda4b28ddd7f32fee4a2016ea0f697b5023e4122d7ec" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.463218 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7kqx5" event={"ID":"ff08cdaf-0d49-4f4d-bc40-977a970ef1a5","Type":"ContainerDied","Data":"e8b3bbf28682918836fe5a6f97b3e6515b735117231c6bad1fea4b11254e9fdc"} Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.463916 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7kqx5" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.492335 4769 scope.go:117] "RemoveContainer" containerID="0bc46cb34b08e98904adb0da4cb4635afbce4f71893a2e07343284a86268f8bd" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.496075 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7m4kh"] Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.497154 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.504272 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7m4kh"] Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.516953 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" podStartSLOduration=1.516929271 podStartE2EDuration="1.516929271s" podCreationTimestamp="2025-11-25 09:48:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:48:51.512841884 +0000 UTC m=+280.097814217" watchObservedRunningTime="2025-11-25 09:48:51.516929271 +0000 UTC m=+280.101901584" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.527541 4769 scope.go:117] "RemoveContainer" containerID="36e244230874911c54b925004fc09cc221d6adfe7e092bb37a5380d83090d852" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.545934 4769 scope.go:117] "RemoveContainer" containerID="88cbc615d5e7d0867efd0f9206563c40847cafdfdf13d22f9e81d09efc3154f7" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.562238 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gfmk8"] Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.566653 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gfmk8"] Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.575336 4769 scope.go:117] "RemoveContainer" containerID="7b6e771a1d7c47975b55ae24ad93520f1bd8aec25575ac25091a5d283bbfc09b" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.591406 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c4dbm"] Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.594682 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c4dbm"] Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.598505 4769 scope.go:117] "RemoveContainer" containerID="0fc225b6eb3ce8c6318681fc3e39185462b760f526c4c60711cb422454666784" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.616882 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7kqx5"] Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.629224 4769 scope.go:117] "RemoveContainer" containerID="2478a6131b4dbc071585e61e36361b315efbfee7214533fb271707184453cde6" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.631896 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7kqx5"] Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.636847 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rdchw"] Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.639911 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-rdchw"] Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.646772 4769 scope.go:117] "RemoveContainer" containerID="04c68de936e4d9c785414d36434ca33cc59e0eb0f94876af94f7f401c44009b5" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.662029 4769 scope.go:117] "RemoveContainer" containerID="c187e12d695d96e42ebe72a492eb7ef7293e8b5cbf52078fdcf6d6e736636d13" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.679828 4769 scope.go:117] "RemoveContainer" containerID="e60ca5c15e6f4f364e711a1439220217cfb50c172de86c71e130e03ad1edc5d6" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.702710 4769 scope.go:117] "RemoveContainer" containerID="df61445c9976b18a2074fae62a4abe562fd117e480b98e8ff8a7325f97ece260" Nov 25 09:48:51 crc kubenswrapper[4769]: I1125 09:48:51.729050 4769 scope.go:117] "RemoveContainer" containerID="93ff5604c514891dc5a4e7491656d61129abb396c8d51aa117ed19503fc5b1e1" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.247044 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" path="/var/lib/kubelet/pods/1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6/volumes" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.247851 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" path="/var/lib/kubelet/pods/51cc9569-8a6c-473c-a6c7-628c4c7e1aed/volumes" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.248633 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8cf525e-b1f8-447d-bde3-7a8746836e39" path="/var/lib/kubelet/pods/b8cf525e-b1f8-447d-bde3-7a8746836e39/volumes" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.249903 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9929f0c-776d-4583-94ca-bd665b5d9783" path="/var/lib/kubelet/pods/d9929f0c-776d-4583-94ca-bd665b5d9783/volumes" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.250445 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" path="/var/lib/kubelet/pods/ff08cdaf-0d49-4f4d-bc40-977a970ef1a5/volumes" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.341881 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5cw8t"] Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342157 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" containerName="extract-content" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342174 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" containerName="extract-content" Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342186 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" containerName="extract-content" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342194 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" containerName="extract-content" Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342206 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" containerName="extract-content" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342215 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" containerName="extract-content" Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342224 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" containerName="registry-server" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342231 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" containerName="registry-server" Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342242 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" containerName="extract-utilities" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342250 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" containerName="extract-utilities" Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342262 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" containerName="extract-utilities" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342270 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" containerName="extract-utilities" Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342281 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8cf525e-b1f8-447d-bde3-7a8746836e39" containerName="extract-content" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342289 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8cf525e-b1f8-447d-bde3-7a8746836e39" containerName="extract-content" Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342301 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9929f0c-776d-4583-94ca-bd665b5d9783" containerName="marketplace-operator" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342309 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9929f0c-776d-4583-94ca-bd665b5d9783" containerName="marketplace-operator" Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342318 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8cf525e-b1f8-447d-bde3-7a8746836e39" containerName="registry-server" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342326 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8cf525e-b1f8-447d-bde3-7a8746836e39" containerName="registry-server" Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342335 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" containerName="extract-utilities" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342345 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" containerName="extract-utilities" Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342362 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8cf525e-b1f8-447d-bde3-7a8746836e39" containerName="extract-utilities" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342565 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8cf525e-b1f8-447d-bde3-7a8746836e39" containerName="extract-utilities" Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342575 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" containerName="registry-server" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342582 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" containerName="registry-server" Nov 25 09:48:52 crc kubenswrapper[4769]: E1125 09:48:52.342591 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" containerName="registry-server" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342598 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" containerName="registry-server" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342714 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9929f0c-776d-4583-94ca-bd665b5d9783" containerName="marketplace-operator" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342732 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="51cc9569-8a6c-473c-a6c7-628c4c7e1aed" containerName="registry-server" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342743 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="1751a9e9-57fa-4cd4-aba4-d6d0c1d134e6" containerName="registry-server" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342754 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff08cdaf-0d49-4f4d-bc40-977a970ef1a5" containerName="registry-server" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.342764 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8cf525e-b1f8-447d-bde3-7a8746836e39" containerName="registry-server" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.343744 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.346310 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.353906 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5cw8t"] Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.454979 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63da627e-e321-47f1-9743-b13ed41ac4cb-catalog-content\") pod \"redhat-marketplace-5cw8t\" (UID: \"63da627e-e321-47f1-9743-b13ed41ac4cb\") " pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.455040 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63da627e-e321-47f1-9743-b13ed41ac4cb-utilities\") pod \"redhat-marketplace-5cw8t\" (UID: \"63da627e-e321-47f1-9743-b13ed41ac4cb\") " pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.455108 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjss5\" (UniqueName: \"kubernetes.io/projected/63da627e-e321-47f1-9743-b13ed41ac4cb-kube-api-access-sjss5\") pod \"redhat-marketplace-5cw8t\" (UID: \"63da627e-e321-47f1-9743-b13ed41ac4cb\") " pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.548806 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-j7vxb"] Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.550416 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.554017 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.557045 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63da627e-e321-47f1-9743-b13ed41ac4cb-catalog-content\") pod \"redhat-marketplace-5cw8t\" (UID: \"63da627e-e321-47f1-9743-b13ed41ac4cb\") " pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.557086 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63da627e-e321-47f1-9743-b13ed41ac4cb-utilities\") pod \"redhat-marketplace-5cw8t\" (UID: \"63da627e-e321-47f1-9743-b13ed41ac4cb\") " pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.557107 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j7vxb"] Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.557200 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjss5\" (UniqueName: \"kubernetes.io/projected/63da627e-e321-47f1-9743-b13ed41ac4cb-kube-api-access-sjss5\") pod \"redhat-marketplace-5cw8t\" (UID: \"63da627e-e321-47f1-9743-b13ed41ac4cb\") " pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.557704 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63da627e-e321-47f1-9743-b13ed41ac4cb-utilities\") pod \"redhat-marketplace-5cw8t\" (UID: \"63da627e-e321-47f1-9743-b13ed41ac4cb\") " pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.558026 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63da627e-e321-47f1-9743-b13ed41ac4cb-catalog-content\") pod \"redhat-marketplace-5cw8t\" (UID: \"63da627e-e321-47f1-9743-b13ed41ac4cb\") " pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.583875 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjss5\" (UniqueName: \"kubernetes.io/projected/63da627e-e321-47f1-9743-b13ed41ac4cb-kube-api-access-sjss5\") pod \"redhat-marketplace-5cw8t\" (UID: \"63da627e-e321-47f1-9743-b13ed41ac4cb\") " pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.658559 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8-utilities\") pod \"community-operators-j7vxb\" (UID: \"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8\") " pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.658670 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4z57l\" (UniqueName: \"kubernetes.io/projected/6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8-kube-api-access-4z57l\") pod \"community-operators-j7vxb\" (UID: \"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8\") " pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.658698 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8-catalog-content\") pod \"community-operators-j7vxb\" (UID: \"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8\") " pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.675314 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.759855 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4z57l\" (UniqueName: \"kubernetes.io/projected/6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8-kube-api-access-4z57l\") pod \"community-operators-j7vxb\" (UID: \"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8\") " pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.759911 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8-catalog-content\") pod \"community-operators-j7vxb\" (UID: \"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8\") " pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.759945 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8-utilities\") pod \"community-operators-j7vxb\" (UID: \"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8\") " pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.760484 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8-utilities\") pod \"community-operators-j7vxb\" (UID: \"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8\") " pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.761238 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8-catalog-content\") pod \"community-operators-j7vxb\" (UID: \"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8\") " pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.786927 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4z57l\" (UniqueName: \"kubernetes.io/projected/6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8-kube-api-access-4z57l\") pod \"community-operators-j7vxb\" (UID: \"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8\") " pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:48:52 crc kubenswrapper[4769]: I1125 09:48:52.941443 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:48:53 crc kubenswrapper[4769]: I1125 09:48:53.090205 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5cw8t"] Nov 25 09:48:53 crc kubenswrapper[4769]: I1125 09:48:53.143823 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j7vxb"] Nov 25 09:48:53 crc kubenswrapper[4769]: W1125 09:48:53.171313 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b2b21af_4ed5_4c0a_bf71_5657ec5cf6e8.slice/crio-b1b1bebd383bd9ce82ce8128f1fbc37a1b3606fbfdbda2e5a135f327e719419d WatchSource:0}: Error finding container b1b1bebd383bd9ce82ce8128f1fbc37a1b3606fbfdbda2e5a135f327e719419d: Status 404 returned error can't find the container with id b1b1bebd383bd9ce82ce8128f1fbc37a1b3606fbfdbda2e5a135f327e719419d Nov 25 09:48:53 crc kubenswrapper[4769]: I1125 09:48:53.486728 4769 generic.go:334] "Generic (PLEG): container finished" podID="63da627e-e321-47f1-9743-b13ed41ac4cb" containerID="4c3ff9c547be1bbadfcd88856fca70ecf66a44daf4ad29aab697eb4f77cea82e" exitCode=0 Nov 25 09:48:53 crc kubenswrapper[4769]: I1125 09:48:53.487181 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5cw8t" event={"ID":"63da627e-e321-47f1-9743-b13ed41ac4cb","Type":"ContainerDied","Data":"4c3ff9c547be1bbadfcd88856fca70ecf66a44daf4ad29aab697eb4f77cea82e"} Nov 25 09:48:53 crc kubenswrapper[4769]: I1125 09:48:53.487275 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5cw8t" event={"ID":"63da627e-e321-47f1-9743-b13ed41ac4cb","Type":"ContainerStarted","Data":"c742f1c6e135b356f46d8fe2c5d21da2e3ff1d0790c1b8924e748b70d44ec421"} Nov 25 09:48:53 crc kubenswrapper[4769]: I1125 09:48:53.489827 4769 generic.go:334] "Generic (PLEG): container finished" podID="6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8" containerID="d2be2d60d423a7d88e2a72eb8074c6b94b00892aefeb16efa386a48bc1225904" exitCode=0 Nov 25 09:48:53 crc kubenswrapper[4769]: I1125 09:48:53.490201 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j7vxb" event={"ID":"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8","Type":"ContainerDied","Data":"d2be2d60d423a7d88e2a72eb8074c6b94b00892aefeb16efa386a48bc1225904"} Nov 25 09:48:53 crc kubenswrapper[4769]: I1125 09:48:53.490320 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j7vxb" event={"ID":"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8","Type":"ContainerStarted","Data":"b1b1bebd383bd9ce82ce8128f1fbc37a1b3606fbfdbda2e5a135f327e719419d"} Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.498640 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5cw8t" event={"ID":"63da627e-e321-47f1-9743-b13ed41ac4cb","Type":"ContainerStarted","Data":"f64538cdfbc816b221920578e12a2c16db2302de9af2bda21285cac9b349f3a8"} Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.501500 4769 generic.go:334] "Generic (PLEG): container finished" podID="6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8" containerID="76050a4ab985e81e319050e81aea31d0de281d451405ec95009dc34b368557c1" exitCode=0 Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.501549 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j7vxb" event={"ID":"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8","Type":"ContainerDied","Data":"76050a4ab985e81e319050e81aea31d0de281d451405ec95009dc34b368557c1"} Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.739908 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lfhp2"] Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.741635 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.744025 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.749353 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lfhp2"] Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.790827 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-utilities\") pod \"redhat-operators-lfhp2\" (UID: \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\") " pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.790894 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-catalog-content\") pod \"redhat-operators-lfhp2\" (UID: \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\") " pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.790933 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4vrq\" (UniqueName: \"kubernetes.io/projected/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-kube-api-access-d4vrq\") pod \"redhat-operators-lfhp2\" (UID: \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\") " pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.892222 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4vrq\" (UniqueName: \"kubernetes.io/projected/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-kube-api-access-d4vrq\") pod \"redhat-operators-lfhp2\" (UID: \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\") " pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.892410 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-utilities\") pod \"redhat-operators-lfhp2\" (UID: \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\") " pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.892455 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-catalog-content\") pod \"redhat-operators-lfhp2\" (UID: \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\") " pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.893095 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-utilities\") pod \"redhat-operators-lfhp2\" (UID: \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\") " pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.893184 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-catalog-content\") pod \"redhat-operators-lfhp2\" (UID: \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\") " pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.912914 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4vrq\" (UniqueName: \"kubernetes.io/projected/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-kube-api-access-d4vrq\") pod \"redhat-operators-lfhp2\" (UID: \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\") " pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.941144 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lxhnd"] Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.942435 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.946698 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.961204 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lxhnd"] Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.994227 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79614043-7894-49fb-a36f-24544c3653e0-utilities\") pod \"certified-operators-lxhnd\" (UID: \"79614043-7894-49fb-a36f-24544c3653e0\") " pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.994312 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79614043-7894-49fb-a36f-24544c3653e0-catalog-content\") pod \"certified-operators-lxhnd\" (UID: \"79614043-7894-49fb-a36f-24544c3653e0\") " pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:48:54 crc kubenswrapper[4769]: I1125 09:48:54.994352 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pf6ft\" (UniqueName: \"kubernetes.io/projected/79614043-7894-49fb-a36f-24544c3653e0-kube-api-access-pf6ft\") pod \"certified-operators-lxhnd\" (UID: \"79614043-7894-49fb-a36f-24544c3653e0\") " pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.064813 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.096366 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79614043-7894-49fb-a36f-24544c3653e0-catalog-content\") pod \"certified-operators-lxhnd\" (UID: \"79614043-7894-49fb-a36f-24544c3653e0\") " pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.096429 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pf6ft\" (UniqueName: \"kubernetes.io/projected/79614043-7894-49fb-a36f-24544c3653e0-kube-api-access-pf6ft\") pod \"certified-operators-lxhnd\" (UID: \"79614043-7894-49fb-a36f-24544c3653e0\") " pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.096555 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79614043-7894-49fb-a36f-24544c3653e0-utilities\") pod \"certified-operators-lxhnd\" (UID: \"79614043-7894-49fb-a36f-24544c3653e0\") " pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.097120 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79614043-7894-49fb-a36f-24544c3653e0-utilities\") pod \"certified-operators-lxhnd\" (UID: \"79614043-7894-49fb-a36f-24544c3653e0\") " pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.097203 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79614043-7894-49fb-a36f-24544c3653e0-catalog-content\") pod \"certified-operators-lxhnd\" (UID: \"79614043-7894-49fb-a36f-24544c3653e0\") " pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.120717 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pf6ft\" (UniqueName: \"kubernetes.io/projected/79614043-7894-49fb-a36f-24544c3653e0-kube-api-access-pf6ft\") pod \"certified-operators-lxhnd\" (UID: \"79614043-7894-49fb-a36f-24544c3653e0\") " pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.285600 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.480531 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lxhnd"] Nov 25 09:48:55 crc kubenswrapper[4769]: W1125 09:48:55.487285 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod79614043_7894_49fb_a36f_24544c3653e0.slice/crio-513483fb908cf729e447a2ab280169e0751c86b2755f2d90adfe87ecc8601f5b WatchSource:0}: Error finding container 513483fb908cf729e447a2ab280169e0751c86b2755f2d90adfe87ecc8601f5b: Status 404 returned error can't find the container with id 513483fb908cf729e447a2ab280169e0751c86b2755f2d90adfe87ecc8601f5b Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.508940 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxhnd" event={"ID":"79614043-7894-49fb-a36f-24544c3653e0","Type":"ContainerStarted","Data":"513483fb908cf729e447a2ab280169e0751c86b2755f2d90adfe87ecc8601f5b"} Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.510692 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lfhp2"] Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.511486 4769 generic.go:334] "Generic (PLEG): container finished" podID="63da627e-e321-47f1-9743-b13ed41ac4cb" containerID="f64538cdfbc816b221920578e12a2c16db2302de9af2bda21285cac9b349f3a8" exitCode=0 Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.511603 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5cw8t" event={"ID":"63da627e-e321-47f1-9743-b13ed41ac4cb","Type":"ContainerDied","Data":"f64538cdfbc816b221920578e12a2c16db2302de9af2bda21285cac9b349f3a8"} Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.517478 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j7vxb" event={"ID":"6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8","Type":"ContainerStarted","Data":"9d9ed3975030fe4c232d7992a936b6b685a48b8118dd4716e45cfc6b06a0ad98"} Nov 25 09:48:55 crc kubenswrapper[4769]: I1125 09:48:55.566562 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-j7vxb" podStartSLOduration=2.107595679 podStartE2EDuration="3.566514316s" podCreationTimestamp="2025-11-25 09:48:52 +0000 UTC" firstStartedPulling="2025-11-25 09:48:53.494796912 +0000 UTC m=+282.079769225" lastFinishedPulling="2025-11-25 09:48:54.953715559 +0000 UTC m=+283.538687862" observedRunningTime="2025-11-25 09:48:55.561058011 +0000 UTC m=+284.146030354" watchObservedRunningTime="2025-11-25 09:48:55.566514316 +0000 UTC m=+284.151486639" Nov 25 09:48:56 crc kubenswrapper[4769]: I1125 09:48:56.525822 4769 generic.go:334] "Generic (PLEG): container finished" podID="cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" containerID="64ace453db6ff165529c2ad106e90c1e2294bdcb16367bdb037eadaad2360688" exitCode=0 Nov 25 09:48:56 crc kubenswrapper[4769]: I1125 09:48:56.526017 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfhp2" event={"ID":"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9","Type":"ContainerDied","Data":"64ace453db6ff165529c2ad106e90c1e2294bdcb16367bdb037eadaad2360688"} Nov 25 09:48:56 crc kubenswrapper[4769]: I1125 09:48:56.526385 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfhp2" event={"ID":"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9","Type":"ContainerStarted","Data":"b139317a32f9848714df4eb5058e8f938259eb8f935188b3d5449c1c8bb5b0b6"} Nov 25 09:48:56 crc kubenswrapper[4769]: I1125 09:48:56.530030 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5cw8t" event={"ID":"63da627e-e321-47f1-9743-b13ed41ac4cb","Type":"ContainerStarted","Data":"829875752abf4065801fd1dbd6660cf7ea06aa70dab3ca6f1dfd3b64817290b0"} Nov 25 09:48:56 crc kubenswrapper[4769]: I1125 09:48:56.531794 4769 generic.go:334] "Generic (PLEG): container finished" podID="79614043-7894-49fb-a36f-24544c3653e0" containerID="08a93dd4eeef3d51c6f3b966e246771921a0ff219a356cc9cf4280c9bc933994" exitCode=0 Nov 25 09:48:56 crc kubenswrapper[4769]: I1125 09:48:56.531871 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxhnd" event={"ID":"79614043-7894-49fb-a36f-24544c3653e0","Type":"ContainerDied","Data":"08a93dd4eeef3d51c6f3b966e246771921a0ff219a356cc9cf4280c9bc933994"} Nov 25 09:48:56 crc kubenswrapper[4769]: I1125 09:48:56.568052 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5cw8t" podStartSLOduration=2.089883364 podStartE2EDuration="4.568036113s" podCreationTimestamp="2025-11-25 09:48:52 +0000 UTC" firstStartedPulling="2025-11-25 09:48:53.489328256 +0000 UTC m=+282.074300569" lastFinishedPulling="2025-11-25 09:48:55.967481005 +0000 UTC m=+284.552453318" observedRunningTime="2025-11-25 09:48:56.567032072 +0000 UTC m=+285.152004385" watchObservedRunningTime="2025-11-25 09:48:56.568036113 +0000 UTC m=+285.153008426" Nov 25 09:48:57 crc kubenswrapper[4769]: I1125 09:48:57.538947 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfhp2" event={"ID":"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9","Type":"ContainerStarted","Data":"ed979bd7fe6d53bbaf385aaa5f86e45932ed2fbbb0a03d68b1669dfe2f9bbb46"} Nov 25 09:48:57 crc kubenswrapper[4769]: I1125 09:48:57.542543 4769 generic.go:334] "Generic (PLEG): container finished" podID="79614043-7894-49fb-a36f-24544c3653e0" containerID="d73e2ad3fd3d64fdae2400fcc0e79a44e26ab44819a4d363d45320f02c683d92" exitCode=0 Nov 25 09:48:57 crc kubenswrapper[4769]: I1125 09:48:57.542698 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxhnd" event={"ID":"79614043-7894-49fb-a36f-24544c3653e0","Type":"ContainerDied","Data":"d73e2ad3fd3d64fdae2400fcc0e79a44e26ab44819a4d363d45320f02c683d92"} Nov 25 09:48:58 crc kubenswrapper[4769]: I1125 09:48:58.551927 4769 generic.go:334] "Generic (PLEG): container finished" podID="cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" containerID="ed979bd7fe6d53bbaf385aaa5f86e45932ed2fbbb0a03d68b1669dfe2f9bbb46" exitCode=0 Nov 25 09:48:58 crc kubenswrapper[4769]: I1125 09:48:58.552044 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfhp2" event={"ID":"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9","Type":"ContainerDied","Data":"ed979bd7fe6d53bbaf385aaa5f86e45932ed2fbbb0a03d68b1669dfe2f9bbb46"} Nov 25 09:48:58 crc kubenswrapper[4769]: I1125 09:48:58.557163 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxhnd" event={"ID":"79614043-7894-49fb-a36f-24544c3653e0","Type":"ContainerStarted","Data":"833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4"} Nov 25 09:48:58 crc kubenswrapper[4769]: I1125 09:48:58.593275 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lxhnd" podStartSLOduration=3.085122966 podStartE2EDuration="4.593254214s" podCreationTimestamp="2025-11-25 09:48:54 +0000 UTC" firstStartedPulling="2025-11-25 09:48:56.533427521 +0000 UTC m=+285.118399834" lastFinishedPulling="2025-11-25 09:48:58.041558769 +0000 UTC m=+286.626531082" observedRunningTime="2025-11-25 09:48:58.592833215 +0000 UTC m=+287.177805538" watchObservedRunningTime="2025-11-25 09:48:58.593254214 +0000 UTC m=+287.178226527" Nov 25 09:48:59 crc kubenswrapper[4769]: I1125 09:48:59.567790 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfhp2" event={"ID":"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9","Type":"ContainerStarted","Data":"bdb4d39d436d54e0a6e5fd318c207a277a3cd25705a90a36a867724a63110bc4"} Nov 25 09:48:59 crc kubenswrapper[4769]: I1125 09:48:59.587050 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lfhp2" podStartSLOduration=3.12169977 podStartE2EDuration="5.587012217s" podCreationTimestamp="2025-11-25 09:48:54 +0000 UTC" firstStartedPulling="2025-11-25 09:48:56.527573367 +0000 UTC m=+285.112545680" lastFinishedPulling="2025-11-25 09:48:58.992885814 +0000 UTC m=+287.577858127" observedRunningTime="2025-11-25 09:48:59.584645587 +0000 UTC m=+288.169617900" watchObservedRunningTime="2025-11-25 09:48:59.587012217 +0000 UTC m=+288.171984540" Nov 25 09:49:02 crc kubenswrapper[4769]: I1125 09:49:02.676376 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:49:02 crc kubenswrapper[4769]: I1125 09:49:02.676694 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:49:02 crc kubenswrapper[4769]: I1125 09:49:02.729766 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:49:02 crc kubenswrapper[4769]: I1125 09:49:02.941881 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:49:02 crc kubenswrapper[4769]: I1125 09:49:02.941942 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:49:02 crc kubenswrapper[4769]: I1125 09:49:02.990161 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:49:03 crc kubenswrapper[4769]: I1125 09:49:03.652622 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5cw8t" Nov 25 09:49:03 crc kubenswrapper[4769]: I1125 09:49:03.653276 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-j7vxb" Nov 25 09:49:05 crc kubenswrapper[4769]: I1125 09:49:05.065121 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:49:05 crc kubenswrapper[4769]: I1125 09:49:05.065676 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:49:05 crc kubenswrapper[4769]: I1125 09:49:05.113876 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:49:05 crc kubenswrapper[4769]: I1125 09:49:05.286441 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:49:05 crc kubenswrapper[4769]: I1125 09:49:05.286508 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:49:05 crc kubenswrapper[4769]: I1125 09:49:05.333848 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:49:05 crc kubenswrapper[4769]: I1125 09:49:05.662345 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 09:49:05 crc kubenswrapper[4769]: I1125 09:49:05.666470 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 09:49:21 crc kubenswrapper[4769]: I1125 09:49:21.792248 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk"] Nov 25 09:49:21 crc kubenswrapper[4769]: I1125 09:49:21.794075 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" Nov 25 09:49:21 crc kubenswrapper[4769]: I1125 09:49:21.798876 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Nov 25 09:49:21 crc kubenswrapper[4769]: I1125 09:49:21.799219 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Nov 25 09:49:21 crc kubenswrapper[4769]: I1125 09:49:21.799253 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Nov 25 09:49:21 crc kubenswrapper[4769]: I1125 09:49:21.799497 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Nov 25 09:49:21 crc kubenswrapper[4769]: I1125 09:49:21.806488 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Nov 25 09:49:21 crc kubenswrapper[4769]: I1125 09:49:21.814266 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk"] Nov 25 09:49:21 crc kubenswrapper[4769]: I1125 09:49:21.932613 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwr5p\" (UniqueName: \"kubernetes.io/projected/437ab018-bf92-4c37-9c0f-50ef058be208-kube-api-access-dwr5p\") pod \"cluster-monitoring-operator-6d5b84845-6czzk\" (UID: \"437ab018-bf92-4c37-9c0f-50ef058be208\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" Nov 25 09:49:21 crc kubenswrapper[4769]: I1125 09:49:21.932746 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/437ab018-bf92-4c37-9c0f-50ef058be208-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-6czzk\" (UID: \"437ab018-bf92-4c37-9c0f-50ef058be208\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" Nov 25 09:49:21 crc kubenswrapper[4769]: I1125 09:49:21.932788 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/437ab018-bf92-4c37-9c0f-50ef058be208-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-6czzk\" (UID: \"437ab018-bf92-4c37-9c0f-50ef058be208\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" Nov 25 09:49:22 crc kubenswrapper[4769]: I1125 09:49:22.034349 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwr5p\" (UniqueName: \"kubernetes.io/projected/437ab018-bf92-4c37-9c0f-50ef058be208-kube-api-access-dwr5p\") pod \"cluster-monitoring-operator-6d5b84845-6czzk\" (UID: \"437ab018-bf92-4c37-9c0f-50ef058be208\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" Nov 25 09:49:22 crc kubenswrapper[4769]: I1125 09:49:22.034704 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/437ab018-bf92-4c37-9c0f-50ef058be208-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-6czzk\" (UID: \"437ab018-bf92-4c37-9c0f-50ef058be208\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" Nov 25 09:49:22 crc kubenswrapper[4769]: I1125 09:49:22.034745 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/437ab018-bf92-4c37-9c0f-50ef058be208-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-6czzk\" (UID: \"437ab018-bf92-4c37-9c0f-50ef058be208\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" Nov 25 09:49:22 crc kubenswrapper[4769]: I1125 09:49:22.035877 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/437ab018-bf92-4c37-9c0f-50ef058be208-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-6czzk\" (UID: \"437ab018-bf92-4c37-9c0f-50ef058be208\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" Nov 25 09:49:22 crc kubenswrapper[4769]: I1125 09:49:22.044150 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/437ab018-bf92-4c37-9c0f-50ef058be208-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-6czzk\" (UID: \"437ab018-bf92-4c37-9c0f-50ef058be208\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" Nov 25 09:49:22 crc kubenswrapper[4769]: I1125 09:49:22.057258 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwr5p\" (UniqueName: \"kubernetes.io/projected/437ab018-bf92-4c37-9c0f-50ef058be208-kube-api-access-dwr5p\") pod \"cluster-monitoring-operator-6d5b84845-6czzk\" (UID: \"437ab018-bf92-4c37-9c0f-50ef058be208\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" Nov 25 09:49:22 crc kubenswrapper[4769]: I1125 09:49:22.125064 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" Nov 25 09:49:22 crc kubenswrapper[4769]: I1125 09:49:22.350353 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk"] Nov 25 09:49:22 crc kubenswrapper[4769]: I1125 09:49:22.725467 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" event={"ID":"437ab018-bf92-4c37-9c0f-50ef058be208","Type":"ContainerStarted","Data":"aba39af991270756fe3d3451dddec5f3e58366c784ad610ca793dc2fda6ba86d"} Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.521734 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kr7q7"] Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.523118 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.547499 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kr7q7"] Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.632335 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf"] Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.633225 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.636188 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-7j7cf" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.636578 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.683386 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf"] Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.690810 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wb7jw\" (UniqueName: \"kubernetes.io/projected/c20a9699-fefc-4f37-b256-ede86ba02610-kube-api-access-wb7jw\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.690859 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c20a9699-fefc-4f37-b256-ede86ba02610-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.690889 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c20a9699-fefc-4f37-b256-ede86ba02610-bound-sa-token\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.690922 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c20a9699-fefc-4f37-b256-ede86ba02610-registry-tls\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.691143 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c20a9699-fefc-4f37-b256-ede86ba02610-trusted-ca\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.691208 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c20a9699-fefc-4f37-b256-ede86ba02610-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.691264 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c20a9699-fefc-4f37-b256-ede86ba02610-registry-certificates\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.691378 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.727365 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.747052 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" event={"ID":"437ab018-bf92-4c37-9c0f-50ef058be208","Type":"ContainerStarted","Data":"954e58b1a3a938e551e4cbf9723333b9bb662ea2b5e1950952996a2f6176eff5"} Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.761402 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-6czzk" podStartSLOduration=2.245823494 podStartE2EDuration="4.761374804s" podCreationTimestamp="2025-11-25 09:49:21 +0000 UTC" firstStartedPulling="2025-11-25 09:49:22.365111481 +0000 UTC m=+310.950083794" lastFinishedPulling="2025-11-25 09:49:24.880662791 +0000 UTC m=+313.465635104" observedRunningTime="2025-11-25 09:49:25.760160028 +0000 UTC m=+314.345132331" watchObservedRunningTime="2025-11-25 09:49:25.761374804 +0000 UTC m=+314.346347117" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.793007 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/028260ef-c431-4375-914a-53f12a77ac38-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-tbrnf\" (UID: \"028260ef-c431-4375-914a-53f12a77ac38\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.793129 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wb7jw\" (UniqueName: \"kubernetes.io/projected/c20a9699-fefc-4f37-b256-ede86ba02610-kube-api-access-wb7jw\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.793150 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c20a9699-fefc-4f37-b256-ede86ba02610-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.793174 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c20a9699-fefc-4f37-b256-ede86ba02610-bound-sa-token\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.793200 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c20a9699-fefc-4f37-b256-ede86ba02610-registry-tls\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.793227 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c20a9699-fefc-4f37-b256-ede86ba02610-trusted-ca\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.793248 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c20a9699-fefc-4f37-b256-ede86ba02610-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.793272 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c20a9699-fefc-4f37-b256-ede86ba02610-registry-certificates\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.794398 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c20a9699-fefc-4f37-b256-ede86ba02610-registry-certificates\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.794704 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c20a9699-fefc-4f37-b256-ede86ba02610-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.796489 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c20a9699-fefc-4f37-b256-ede86ba02610-trusted-ca\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.800944 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c20a9699-fefc-4f37-b256-ede86ba02610-registry-tls\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.807596 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c20a9699-fefc-4f37-b256-ede86ba02610-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.812187 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c20a9699-fefc-4f37-b256-ede86ba02610-bound-sa-token\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.812635 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wb7jw\" (UniqueName: \"kubernetes.io/projected/c20a9699-fefc-4f37-b256-ede86ba02610-kube-api-access-wb7jw\") pod \"image-registry-66df7c8f76-kr7q7\" (UID: \"c20a9699-fefc-4f37-b256-ede86ba02610\") " pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.851335 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.894747 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/028260ef-c431-4375-914a-53f12a77ac38-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-tbrnf\" (UID: \"028260ef-c431-4375-914a-53f12a77ac38\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.901321 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/028260ef-c431-4375-914a-53f12a77ac38-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-tbrnf\" (UID: \"028260ef-c431-4375-914a-53f12a77ac38\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf" Nov 25 09:49:25 crc kubenswrapper[4769]: I1125 09:49:25.948882 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf" Nov 25 09:49:26 crc kubenswrapper[4769]: I1125 09:49:26.063018 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kr7q7"] Nov 25 09:49:26 crc kubenswrapper[4769]: W1125 09:49:26.079518 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc20a9699_fefc_4f37_b256_ede86ba02610.slice/crio-5be009883773e1fb7df386b7a0a9e85b3fad6a56be1e0655a173339b34ee73db WatchSource:0}: Error finding container 5be009883773e1fb7df386b7a0a9e85b3fad6a56be1e0655a173339b34ee73db: Status 404 returned error can't find the container with id 5be009883773e1fb7df386b7a0a9e85b3fad6a56be1e0655a173339b34ee73db Nov 25 09:49:26 crc kubenswrapper[4769]: I1125 09:49:26.192388 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf"] Nov 25 09:49:26 crc kubenswrapper[4769]: W1125 09:49:26.197885 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod028260ef_c431_4375_914a_53f12a77ac38.slice/crio-65d080367f7fc63fd0b2f4588a4a64e7a192adab6378556a3c13076b5db6fd74 WatchSource:0}: Error finding container 65d080367f7fc63fd0b2f4588a4a64e7a192adab6378556a3c13076b5db6fd74: Status 404 returned error can't find the container with id 65d080367f7fc63fd0b2f4588a4a64e7a192adab6378556a3c13076b5db6fd74 Nov 25 09:49:26 crc kubenswrapper[4769]: I1125 09:49:26.753157 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf" event={"ID":"028260ef-c431-4375-914a-53f12a77ac38","Type":"ContainerStarted","Data":"65d080367f7fc63fd0b2f4588a4a64e7a192adab6378556a3c13076b5db6fd74"} Nov 25 09:49:26 crc kubenswrapper[4769]: I1125 09:49:26.755615 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" event={"ID":"c20a9699-fefc-4f37-b256-ede86ba02610","Type":"ContainerStarted","Data":"7ec2243557f4eb37ee4b630d146442316db599b7db238d5b36a356c708c26ad0"} Nov 25 09:49:26 crc kubenswrapper[4769]: I1125 09:49:26.755924 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" event={"ID":"c20a9699-fefc-4f37-b256-ede86ba02610","Type":"ContainerStarted","Data":"5be009883773e1fb7df386b7a0a9e85b3fad6a56be1e0655a173339b34ee73db"} Nov 25 09:49:27 crc kubenswrapper[4769]: I1125 09:49:27.761712 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:28 crc kubenswrapper[4769]: I1125 09:49:28.770558 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf" event={"ID":"028260ef-c431-4375-914a-53f12a77ac38","Type":"ContainerStarted","Data":"9b65ade41ba07da73037b5a59bafeac3d5f25592ebb42fb6d2f69d5e0b48b53b"} Nov 25 09:49:28 crc kubenswrapper[4769]: I1125 09:49:28.791902 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf" podStartSLOduration=1.949843824 podStartE2EDuration="3.791884272s" podCreationTimestamp="2025-11-25 09:49:25 +0000 UTC" firstStartedPulling="2025-11-25 09:49:26.200216943 +0000 UTC m=+314.785189256" lastFinishedPulling="2025-11-25 09:49:28.042257371 +0000 UTC m=+316.627229704" observedRunningTime="2025-11-25 09:49:28.78991024 +0000 UTC m=+317.374882553" watchObservedRunningTime="2025-11-25 09:49:28.791884272 +0000 UTC m=+317.376856585" Nov 25 09:49:28 crc kubenswrapper[4769]: I1125 09:49:28.792241 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" podStartSLOduration=3.792234129 podStartE2EDuration="3.792234129s" podCreationTimestamp="2025-11-25 09:49:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:49:26.7785097 +0000 UTC m=+315.363482033" watchObservedRunningTime="2025-11-25 09:49:28.792234129 +0000 UTC m=+317.377206442" Nov 25 09:49:29 crc kubenswrapper[4769]: I1125 09:49:29.776378 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf" Nov 25 09:49:29 crc kubenswrapper[4769]: I1125 09:49:29.783530 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-tbrnf" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.694530 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-ctkgt"] Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.696406 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.698745 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.698779 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.699782 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-54tjz" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.704247 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.708452 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-ctkgt"] Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.776022 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/ca4f26bd-c24b-4250-967c-ff17491842a9-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-ctkgt\" (UID: \"ca4f26bd-c24b-4250-967c-ff17491842a9\") " pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.776096 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/ca4f26bd-c24b-4250-967c-ff17491842a9-metrics-client-ca\") pod \"prometheus-operator-db54df47d-ctkgt\" (UID: \"ca4f26bd-c24b-4250-967c-ff17491842a9\") " pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.776156 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q28x9\" (UniqueName: \"kubernetes.io/projected/ca4f26bd-c24b-4250-967c-ff17491842a9-kube-api-access-q28x9\") pod \"prometheus-operator-db54df47d-ctkgt\" (UID: \"ca4f26bd-c24b-4250-967c-ff17491842a9\") " pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.776259 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/ca4f26bd-c24b-4250-967c-ff17491842a9-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-ctkgt\" (UID: \"ca4f26bd-c24b-4250-967c-ff17491842a9\") " pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.878268 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/ca4f26bd-c24b-4250-967c-ff17491842a9-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-ctkgt\" (UID: \"ca4f26bd-c24b-4250-967c-ff17491842a9\") " pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.878345 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/ca4f26bd-c24b-4250-967c-ff17491842a9-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-ctkgt\" (UID: \"ca4f26bd-c24b-4250-967c-ff17491842a9\") " pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.878380 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/ca4f26bd-c24b-4250-967c-ff17491842a9-metrics-client-ca\") pod \"prometheus-operator-db54df47d-ctkgt\" (UID: \"ca4f26bd-c24b-4250-967c-ff17491842a9\") " pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.878457 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q28x9\" (UniqueName: \"kubernetes.io/projected/ca4f26bd-c24b-4250-967c-ff17491842a9-kube-api-access-q28x9\") pod \"prometheus-operator-db54df47d-ctkgt\" (UID: \"ca4f26bd-c24b-4250-967c-ff17491842a9\") " pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.879751 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/ca4f26bd-c24b-4250-967c-ff17491842a9-metrics-client-ca\") pod \"prometheus-operator-db54df47d-ctkgt\" (UID: \"ca4f26bd-c24b-4250-967c-ff17491842a9\") " pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.887192 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/ca4f26bd-c24b-4250-967c-ff17491842a9-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-ctkgt\" (UID: \"ca4f26bd-c24b-4250-967c-ff17491842a9\") " pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.889311 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/ca4f26bd-c24b-4250-967c-ff17491842a9-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-ctkgt\" (UID: \"ca4f26bd-c24b-4250-967c-ff17491842a9\") " pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:30 crc kubenswrapper[4769]: I1125 09:49:30.896952 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q28x9\" (UniqueName: \"kubernetes.io/projected/ca4f26bd-c24b-4250-967c-ff17491842a9-kube-api-access-q28x9\") pod \"prometheus-operator-db54df47d-ctkgt\" (UID: \"ca4f26bd-c24b-4250-967c-ff17491842a9\") " pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:31 crc kubenswrapper[4769]: I1125 09:49:31.018279 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" Nov 25 09:49:31 crc kubenswrapper[4769]: I1125 09:49:31.219799 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-ctkgt"] Nov 25 09:49:31 crc kubenswrapper[4769]: I1125 09:49:31.788246 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" event={"ID":"ca4f26bd-c24b-4250-967c-ff17491842a9","Type":"ContainerStarted","Data":"35b8b748a6e8d23443d51f372922c69de0edca4aeee65c728c6883bad94f76d5"} Nov 25 09:49:33 crc kubenswrapper[4769]: I1125 09:49:33.814415 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" event={"ID":"ca4f26bd-c24b-4250-967c-ff17491842a9","Type":"ContainerStarted","Data":"7d3f08b1820c9f39a8f015eb6ee4b1e60cf458ca37be397c0c4dbcdfd68ec2b3"} Nov 25 09:49:33 crc kubenswrapper[4769]: I1125 09:49:33.814900 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" event={"ID":"ca4f26bd-c24b-4250-967c-ff17491842a9","Type":"ContainerStarted","Data":"6e249efe483c501400920c0246fdf7cb3a1544dc45ec8545e644ae109b489fc4"} Nov 25 09:49:33 crc kubenswrapper[4769]: I1125 09:49:33.833536 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-db54df47d-ctkgt" podStartSLOduration=1.667129276 podStartE2EDuration="3.833511423s" podCreationTimestamp="2025-11-25 09:49:30 +0000 UTC" firstStartedPulling="2025-11-25 09:49:31.237043473 +0000 UTC m=+319.822015786" lastFinishedPulling="2025-11-25 09:49:33.40342559 +0000 UTC m=+321.988397933" observedRunningTime="2025-11-25 09:49:33.830269285 +0000 UTC m=+322.415241598" watchObservedRunningTime="2025-11-25 09:49:33.833511423 +0000 UTC m=+322.418483736" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.056865 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr"] Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.059072 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.061229 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.061612 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/node-exporter-ql79l"] Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.062767 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.064411 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.065624 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.065728 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-7gfdb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.066092 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-78b9k" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.066097 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.082456 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr"] Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.094930 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb"] Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.096183 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.101301 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-592h4" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.101520 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.101570 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.101698 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.127357 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb"] Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.168215 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/40f6ef09-afe3-4358-aaee-543fcc7bde48-metrics-client-ca\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.168280 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/40f6ef09-afe3-4358-aaee-543fcc7bde48-node-exporter-tls\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.168321 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/40f6ef09-afe3-4358-aaee-543fcc7bde48-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.168366 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/6c0c3ad6-cb5d-41d6-893f-c327db857d5c-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-sjtvr\" (UID: \"6c0c3ad6-cb5d-41d6-893f-c327db857d5c\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.168457 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/6c0c3ad6-cb5d-41d6-893f-c327db857d5c-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-sjtvr\" (UID: \"6c0c3ad6-cb5d-41d6-893f-c327db857d5c\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.168501 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzgtx\" (UniqueName: \"kubernetes.io/projected/40f6ef09-afe3-4358-aaee-543fcc7bde48-kube-api-access-tzgtx\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.168529 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/40f6ef09-afe3-4358-aaee-543fcc7bde48-node-exporter-wtmp\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.168687 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdgdp\" (UniqueName: \"kubernetes.io/projected/6c0c3ad6-cb5d-41d6-893f-c327db857d5c-kube-api-access-fdgdp\") pod \"openshift-state-metrics-566fddb674-sjtvr\" (UID: \"6c0c3ad6-cb5d-41d6-893f-c327db857d5c\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.168822 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/6c0c3ad6-cb5d-41d6-893f-c327db857d5c-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-sjtvr\" (UID: \"6c0c3ad6-cb5d-41d6-893f-c327db857d5c\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.168897 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/40f6ef09-afe3-4358-aaee-543fcc7bde48-sys\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.169020 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/40f6ef09-afe3-4358-aaee-543fcc7bde48-node-exporter-textfile\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.169073 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/40f6ef09-afe3-4358-aaee-543fcc7bde48-root\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.270555 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/40f6ef09-afe3-4358-aaee-543fcc7bde48-sys\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.270634 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/93530422-1b32-43b9-bf1f-664692acf0ad-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.270680 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/40f6ef09-afe3-4358-aaee-543fcc7bde48-node-exporter-textfile\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.270704 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/93530422-1b32-43b9-bf1f-664692acf0ad-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.270724 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/40f6ef09-afe3-4358-aaee-543fcc7bde48-root\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.270726 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/40f6ef09-afe3-4358-aaee-543fcc7bde48-sys\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.270748 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/40f6ef09-afe3-4358-aaee-543fcc7bde48-metrics-client-ca\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.270984 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"root\" (UniqueName: \"kubernetes.io/host-path/40f6ef09-afe3-4358-aaee-543fcc7bde48-root\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271006 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/93530422-1b32-43b9-bf1f-664692acf0ad-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271110 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/40f6ef09-afe3-4358-aaee-543fcc7bde48-node-exporter-tls\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271160 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7cfl\" (UniqueName: \"kubernetes.io/projected/93530422-1b32-43b9-bf1f-664692acf0ad-kube-api-access-x7cfl\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271203 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/40f6ef09-afe3-4358-aaee-543fcc7bde48-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271268 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/6c0c3ad6-cb5d-41d6-893f-c327db857d5c-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-sjtvr\" (UID: \"6c0c3ad6-cb5d-41d6-893f-c327db857d5c\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271309 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/6c0c3ad6-cb5d-41d6-893f-c327db857d5c-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-sjtvr\" (UID: \"6c0c3ad6-cb5d-41d6-893f-c327db857d5c\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271333 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/93530422-1b32-43b9-bf1f-664692acf0ad-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271379 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzgtx\" (UniqueName: \"kubernetes.io/projected/40f6ef09-afe3-4358-aaee-543fcc7bde48-kube-api-access-tzgtx\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271414 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/40f6ef09-afe3-4358-aaee-543fcc7bde48-node-exporter-wtmp\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271462 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdgdp\" (UniqueName: \"kubernetes.io/projected/6c0c3ad6-cb5d-41d6-893f-c327db857d5c-kube-api-access-fdgdp\") pod \"openshift-state-metrics-566fddb674-sjtvr\" (UID: \"6c0c3ad6-cb5d-41d6-893f-c327db857d5c\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271497 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/93530422-1b32-43b9-bf1f-664692acf0ad-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271585 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/6c0c3ad6-cb5d-41d6-893f-c327db857d5c-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-sjtvr\" (UID: \"6c0c3ad6-cb5d-41d6-893f-c327db857d5c\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.271748 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/40f6ef09-afe3-4358-aaee-543fcc7bde48-node-exporter-textfile\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.272187 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/40f6ef09-afe3-4358-aaee-543fcc7bde48-node-exporter-wtmp\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.272318 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/40f6ef09-afe3-4358-aaee-543fcc7bde48-metrics-client-ca\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.273142 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/6c0c3ad6-cb5d-41d6-893f-c327db857d5c-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-sjtvr\" (UID: \"6c0c3ad6-cb5d-41d6-893f-c327db857d5c\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.282644 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/6c0c3ad6-cb5d-41d6-893f-c327db857d5c-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-sjtvr\" (UID: \"6c0c3ad6-cb5d-41d6-893f-c327db857d5c\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.290685 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/40f6ef09-afe3-4358-aaee-543fcc7bde48-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.291744 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/6c0c3ad6-cb5d-41d6-893f-c327db857d5c-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-sjtvr\" (UID: \"6c0c3ad6-cb5d-41d6-893f-c327db857d5c\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.297603 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/40f6ef09-afe3-4358-aaee-543fcc7bde48-node-exporter-tls\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.301606 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzgtx\" (UniqueName: \"kubernetes.io/projected/40f6ef09-afe3-4358-aaee-543fcc7bde48-kube-api-access-tzgtx\") pod \"node-exporter-ql79l\" (UID: \"40f6ef09-afe3-4358-aaee-543fcc7bde48\") " pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.358053 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdgdp\" (UniqueName: \"kubernetes.io/projected/6c0c3ad6-cb5d-41d6-893f-c327db857d5c-kube-api-access-fdgdp\") pod \"openshift-state-metrics-566fddb674-sjtvr\" (UID: \"6c0c3ad6-cb5d-41d6-893f-c327db857d5c\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.375001 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7cfl\" (UniqueName: \"kubernetes.io/projected/93530422-1b32-43b9-bf1f-664692acf0ad-kube-api-access-x7cfl\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.375075 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/93530422-1b32-43b9-bf1f-664692acf0ad-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.375108 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/93530422-1b32-43b9-bf1f-664692acf0ad-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.375149 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/93530422-1b32-43b9-bf1f-664692acf0ad-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.375182 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/93530422-1b32-43b9-bf1f-664692acf0ad-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.375214 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/93530422-1b32-43b9-bf1f-664692acf0ad-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.376314 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/93530422-1b32-43b9-bf1f-664692acf0ad-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.380047 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/93530422-1b32-43b9-bf1f-664692acf0ad-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.380413 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/93530422-1b32-43b9-bf1f-664692acf0ad-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.381295 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/93530422-1b32-43b9-bf1f-664692acf0ad-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.383660 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.387857 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/93530422-1b32-43b9-bf1f-664692acf0ad-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.392823 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-ql79l" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.411446 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7cfl\" (UniqueName: \"kubernetes.io/projected/93530422-1b32-43b9-bf1f-664692acf0ad-kube-api-access-x7cfl\") pod \"kube-state-metrics-777cb5bd5d-6m8kb\" (UID: \"93530422-1b32-43b9-bf1f-664692acf0ad\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.416282 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.688864 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr"] Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.830467 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-ql79l" event={"ID":"40f6ef09-afe3-4358-aaee-543fcc7bde48","Type":"ContainerStarted","Data":"6b2221968ab6ef6cdaa00578b3145bc4ad565bb99d38897bef9e6db5b44f8095"} Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.831823 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" event={"ID":"6c0c3ad6-cb5d-41d6-893f-c327db857d5c","Type":"ContainerStarted","Data":"26fcb36e616b0d87cb1868ed99857ec25aedf49e0ae83414e9971cb9b38a882c"} Nov 25 09:49:36 crc kubenswrapper[4769]: I1125 09:49:36.938727 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb"] Nov 25 09:49:36 crc kubenswrapper[4769]: W1125 09:49:36.945041 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod93530422_1b32_43b9_bf1f_664692acf0ad.slice/crio-ae4f413c7be2506056572d27d586716ffd2c16d4204b071e0c20f27a37a6323a WatchSource:0}: Error finding container ae4f413c7be2506056572d27d586716ffd2c16d4204b071e0c20f27a37a6323a: Status 404 returned error can't find the container with id ae4f413c7be2506056572d27d586716ffd2c16d4204b071e0c20f27a37a6323a Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.180568 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.182907 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.185625 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.186058 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.186069 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.186245 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.187118 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.187341 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-kz8wz" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.187547 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.190986 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.199880 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.205382 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.291928 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.292009 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/72737271-19cc-44d8-b3f0-f668c1fe75d9-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.292054 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.292079 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-config-volume\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.292101 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/72737271-19cc-44d8-b3f0-f668c1fe75d9-tls-assets\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.292126 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/72737271-19cc-44d8-b3f0-f668c1fe75d9-config-out\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.292152 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/72737271-19cc-44d8-b3f0-f668c1fe75d9-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.292436 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqf9v\" (UniqueName: \"kubernetes.io/projected/72737271-19cc-44d8-b3f0-f668c1fe75d9-kube-api-access-jqf9v\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.292465 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/72737271-19cc-44d8-b3f0-f668c1fe75d9-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.292495 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-web-config\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.292545 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.292703 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.394812 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqf9v\" (UniqueName: \"kubernetes.io/projected/72737271-19cc-44d8-b3f0-f668c1fe75d9-kube-api-access-jqf9v\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.394877 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/72737271-19cc-44d8-b3f0-f668c1fe75d9-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.394904 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-web-config\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.394940 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.395008 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.395055 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.395082 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/72737271-19cc-44d8-b3f0-f668c1fe75d9-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.395115 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.395135 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-config-volume\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.395156 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/72737271-19cc-44d8-b3f0-f668c1fe75d9-tls-assets\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.395178 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/72737271-19cc-44d8-b3f0-f668c1fe75d9-config-out\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.395194 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/72737271-19cc-44d8-b3f0-f668c1fe75d9-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.396432 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/72737271-19cc-44d8-b3f0-f668c1fe75d9-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.396577 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/72737271-19cc-44d8-b3f0-f668c1fe75d9-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.402296 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/72737271-19cc-44d8-b3f0-f668c1fe75d9-config-out\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.403739 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/72737271-19cc-44d8-b3f0-f668c1fe75d9-tls-assets\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.404680 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.405120 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/72737271-19cc-44d8-b3f0-f668c1fe75d9-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.410692 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.410702 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.410710 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-web-config\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.411276 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.417578 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/72737271-19cc-44d8-b3f0-f668c1fe75d9-config-volume\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.418525 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqf9v\" (UniqueName: \"kubernetes.io/projected/72737271-19cc-44d8-b3f0-f668c1fe75d9-kube-api-access-jqf9v\") pod \"alertmanager-main-0\" (UID: \"72737271-19cc-44d8-b3f0-f668c1fe75d9\") " pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.511629 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.842348 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" event={"ID":"6c0c3ad6-cb5d-41d6-893f-c327db857d5c","Type":"ContainerStarted","Data":"d6e916d85f4856128490bca1497b8ad916edeb59cfcbad87ca95f256f9387e7b"} Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.842851 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" event={"ID":"6c0c3ad6-cb5d-41d6-893f-c327db857d5c","Type":"ContainerStarted","Data":"b099c50652e19ca2adb516b15c145ea540c050758458ed96a362fca48e18d9b3"} Nov 25 09:49:37 crc kubenswrapper[4769]: I1125 09:49:37.844154 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" event={"ID":"93530422-1b32-43b9-bf1f-664692acf0ad","Type":"ContainerStarted","Data":"ae4f413c7be2506056572d27d586716ffd2c16d4204b071e0c20f27a37a6323a"} Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.144747 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/thanos-querier-d49b95667-frlcl"] Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.147643 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.150161 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.150162 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.150463 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-4n0884lam7386" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.150500 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-z2l4v" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.152509 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.152769 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.152982 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.169661 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-d49b95667-frlcl"] Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.246563 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.313105 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xqpw\" (UniqueName: \"kubernetes.io/projected/ba054406-d227-497a-ab7f-fd4e85955cbf-kube-api-access-6xqpw\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.313445 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.313543 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.313759 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/ba054406-d227-497a-ab7f-fd4e85955cbf-metrics-client-ca\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.313805 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-grpc-tls\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.313924 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-tls\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.313956 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.314109 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.416276 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/ba054406-d227-497a-ab7f-fd4e85955cbf-metrics-client-ca\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.416346 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-grpc-tls\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.416451 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-tls\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.417260 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/ba054406-d227-497a-ab7f-fd4e85955cbf-metrics-client-ca\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.416486 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.417738 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.417801 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xqpw\" (UniqueName: \"kubernetes.io/projected/ba054406-d227-497a-ab7f-fd4e85955cbf-kube-api-access-6xqpw\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.417847 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.417892 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.424714 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.425833 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.425840 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.426237 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-tls\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.428559 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.431117 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/ba054406-d227-497a-ab7f-fd4e85955cbf-secret-grpc-tls\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.444258 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xqpw\" (UniqueName: \"kubernetes.io/projected/ba054406-d227-497a-ab7f-fd4e85955cbf-kube-api-access-6xqpw\") pod \"thanos-querier-d49b95667-frlcl\" (UID: \"ba054406-d227-497a-ab7f-fd4e85955cbf\") " pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.469233 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.859696 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"72737271-19cc-44d8-b3f0-f668c1fe75d9","Type":"ContainerStarted","Data":"5cff6501b8234dc8eca16cf62656e9a529ff95753762975cfca238ce80c1f701"} Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.861548 4769 generic.go:334] "Generic (PLEG): container finished" podID="40f6ef09-afe3-4358-aaee-543fcc7bde48" containerID="18a464e599bd78c8fd7f6b88817540e81537d34f1a112f329ae0839519d6655b" exitCode=0 Nov 25 09:49:38 crc kubenswrapper[4769]: I1125 09:49:38.861612 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-ql79l" event={"ID":"40f6ef09-afe3-4358-aaee-543fcc7bde48","Type":"ContainerDied","Data":"18a464e599bd78c8fd7f6b88817540e81537d34f1a112f329ae0839519d6655b"} Nov 25 09:49:39 crc kubenswrapper[4769]: I1125 09:49:39.258689 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-d49b95667-frlcl"] Nov 25 09:49:39 crc kubenswrapper[4769]: W1125 09:49:39.278342 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podba054406_d227_497a_ab7f_fd4e85955cbf.slice/crio-d469195bbb9e48d64fbfa6a4ca13b09320d8fe83ab8ec22ec45cb51ee8b5afd9 WatchSource:0}: Error finding container d469195bbb9e48d64fbfa6a4ca13b09320d8fe83ab8ec22ec45cb51ee8b5afd9: Status 404 returned error can't find the container with id d469195bbb9e48d64fbfa6a4ca13b09320d8fe83ab8ec22ec45cb51ee8b5afd9 Nov 25 09:49:39 crc kubenswrapper[4769]: I1125 09:49:39.870906 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-ql79l" event={"ID":"40f6ef09-afe3-4358-aaee-543fcc7bde48","Type":"ContainerStarted","Data":"a8e0f792ef064aa788d583cb9f75b23658d1d5e135f4fd7d9d5689b5dcd0a116"} Nov 25 09:49:39 crc kubenswrapper[4769]: I1125 09:49:39.870979 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-ql79l" event={"ID":"40f6ef09-afe3-4358-aaee-543fcc7bde48","Type":"ContainerStarted","Data":"a9fb5f62fd70996fa6cf772702b9f06cb5df8733b80d73957861ad51bc4a76b5"} Nov 25 09:49:39 crc kubenswrapper[4769]: I1125 09:49:39.873430 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" event={"ID":"ba054406-d227-497a-ab7f-fd4e85955cbf","Type":"ContainerStarted","Data":"d469195bbb9e48d64fbfa6a4ca13b09320d8fe83ab8ec22ec45cb51ee8b5afd9"} Nov 25 09:49:39 crc kubenswrapper[4769]: I1125 09:49:39.875516 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" event={"ID":"6c0c3ad6-cb5d-41d6-893f-c327db857d5c","Type":"ContainerStarted","Data":"a6cef4afb50b82439f1b39774b5d5dfffa8320b28043cb31f7efc6b2efb095a1"} Nov 25 09:49:39 crc kubenswrapper[4769]: I1125 09:49:39.877620 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" event={"ID":"93530422-1b32-43b9-bf1f-664692acf0ad","Type":"ContainerStarted","Data":"4cd8d36daecb3a9f8f6425f3450dfbce4630c4e82ae6e01bdbbae3761e542ddd"} Nov 25 09:49:39 crc kubenswrapper[4769]: I1125 09:49:39.877653 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" event={"ID":"93530422-1b32-43b9-bf1f-664692acf0ad","Type":"ContainerStarted","Data":"b3bf5ce9048c702a19a68715b8ef42fef330dcac8599a2bc6674e5147ee1b3e1"} Nov 25 09:49:39 crc kubenswrapper[4769]: I1125 09:49:39.877666 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" event={"ID":"93530422-1b32-43b9-bf1f-664692acf0ad","Type":"ContainerStarted","Data":"483bd239301556c0a59875728acd0b3e8a4f44520f795f9516a78b6b2147bfc1"} Nov 25 09:49:39 crc kubenswrapper[4769]: I1125 09:49:39.907188 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/node-exporter-ql79l" podStartSLOduration=2.596788415 podStartE2EDuration="3.907163371s" podCreationTimestamp="2025-11-25 09:49:36 +0000 UTC" firstStartedPulling="2025-11-25 09:49:36.4477069 +0000 UTC m=+325.032679213" lastFinishedPulling="2025-11-25 09:49:37.758081856 +0000 UTC m=+326.343054169" observedRunningTime="2025-11-25 09:49:39.897269896 +0000 UTC m=+328.482242219" watchObservedRunningTime="2025-11-25 09:49:39.907163371 +0000 UTC m=+328.492135684" Nov 25 09:49:39 crc kubenswrapper[4769]: I1125 09:49:39.918926 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-6m8kb" podStartSLOduration=1.9391013080000001 podStartE2EDuration="3.918900469s" podCreationTimestamp="2025-11-25 09:49:36 +0000 UTC" firstStartedPulling="2025-11-25 09:49:36.948328545 +0000 UTC m=+325.533300858" lastFinishedPulling="2025-11-25 09:49:38.928127706 +0000 UTC m=+327.513100019" observedRunningTime="2025-11-25 09:49:39.914855412 +0000 UTC m=+328.499827725" watchObservedRunningTime="2025-11-25 09:49:39.918900469 +0000 UTC m=+328.503872782" Nov 25 09:49:39 crc kubenswrapper[4769]: I1125 09:49:39.941281 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/openshift-state-metrics-566fddb674-sjtvr" podStartSLOduration=1.98404736 podStartE2EDuration="3.941258583s" podCreationTimestamp="2025-11-25 09:49:36 +0000 UTC" firstStartedPulling="2025-11-25 09:49:36.976038411 +0000 UTC m=+325.561010724" lastFinishedPulling="2025-11-25 09:49:38.933249634 +0000 UTC m=+327.518221947" observedRunningTime="2025-11-25 09:49:39.938121543 +0000 UTC m=+328.523093866" watchObservedRunningTime="2025-11-25 09:49:39.941258583 +0000 UTC m=+328.526230896" Nov 25 09:49:40 crc kubenswrapper[4769]: I1125 09:49:40.884593 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-c9c8696b8-vrp57"] Nov 25 09:49:40 crc kubenswrapper[4769]: I1125 09:49:40.885920 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:40 crc kubenswrapper[4769]: I1125 09:49:40.896152 4769 generic.go:334] "Generic (PLEG): container finished" podID="72737271-19cc-44d8-b3f0-f668c1fe75d9" containerID="3cc091a315baeb14982741baa34696518aed895672140277861748e987559de0" exitCode=0 Nov 25 09:49:40 crc kubenswrapper[4769]: I1125 09:49:40.896379 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"72737271-19cc-44d8-b3f0-f668c1fe75d9","Type":"ContainerDied","Data":"3cc091a315baeb14982741baa34696518aed895672140277861748e987559de0"} Nov 25 09:49:40 crc kubenswrapper[4769]: I1125 09:49:40.908605 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-c9c8696b8-vrp57"] Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.061549 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-console-config\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.061637 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-trusted-ca-bundle\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.061726 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-oauth-serving-cert\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.061777 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/67c31f3a-4080-485f-b383-4998935c91c3-console-serving-cert\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.061808 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccnhr\" (UniqueName: \"kubernetes.io/projected/67c31f3a-4080-485f-b383-4998935c91c3-kube-api-access-ccnhr\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.061841 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/67c31f3a-4080-485f-b383-4998935c91c3-console-oauth-config\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.062010 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-service-ca\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.163365 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-service-ca\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.163455 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-console-config\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.163619 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-trusted-ca-bundle\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.163695 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-oauth-serving-cert\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.164152 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/67c31f3a-4080-485f-b383-4998935c91c3-console-serving-cert\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.164289 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccnhr\" (UniqueName: \"kubernetes.io/projected/67c31f3a-4080-485f-b383-4998935c91c3-kube-api-access-ccnhr\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.164319 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/67c31f3a-4080-485f-b383-4998935c91c3-console-oauth-config\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.165756 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-service-ca\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.165868 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-console-config\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.166137 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-oauth-serving-cert\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.166653 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-trusted-ca-bundle\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.171174 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/67c31f3a-4080-485f-b383-4998935c91c3-console-oauth-config\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.184042 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccnhr\" (UniqueName: \"kubernetes.io/projected/67c31f3a-4080-485f-b383-4998935c91c3-kube-api-access-ccnhr\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.186288 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/67c31f3a-4080-485f-b383-4998935c91c3-console-serving-cert\") pod \"console-c9c8696b8-vrp57\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.213396 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.406453 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/metrics-server-5fddf85589-p5m9n"] Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.408806 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.413720 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.413938 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-hvtwr" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.414545 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.414732 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.414885 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.415132 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-8l6e4i4iqssqo" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.418942 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-5fddf85589-p5m9n"] Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.571144 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-metrics-server-audit-profiles\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.571200 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-secret-metrics-server-tls\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.571303 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-audit-log\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.571349 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfnmv\" (UniqueName: \"kubernetes.io/projected/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-kube-api-access-wfnmv\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.571562 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-secret-metrics-client-certs\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.571744 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.571843 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-client-ca-bundle\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.673661 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-secret-metrics-client-certs\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.673743 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.673774 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-client-ca-bundle\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.673794 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-metrics-server-audit-profiles\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.673812 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-secret-metrics-server-tls\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.673849 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-audit-log\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.673871 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfnmv\" (UniqueName: \"kubernetes.io/projected/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-kube-api-access-wfnmv\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.677188 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.677285 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-metrics-server-audit-profiles\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.677370 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-audit-log\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.680394 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-secret-metrics-server-tls\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.680769 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-client-ca-bundle\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.691476 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-secret-metrics-client-certs\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.695066 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfnmv\" (UniqueName: \"kubernetes.io/projected/6a1a9e0d-87a7-4b51-85e1-7d886adb3e76-kube-api-access-wfnmv\") pod \"metrics-server-5fddf85589-p5m9n\" (UID: \"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76\") " pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.787920 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.887733 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v"] Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.910592 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v"] Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.911120 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.917028 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Nov 25 09:49:41 crc kubenswrapper[4769]: I1125 09:49:41.918709 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.082054 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/3427c858-9645-4274-9933-d65bba8325da-monitoring-plugin-cert\") pod \"monitoring-plugin-5bcc85f5f9-99d4v\" (UID: \"3427c858-9645-4274-9933-d65bba8325da\") " pod="openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.097076 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-c9c8696b8-vrp57"] Nov 25 09:49:42 crc kubenswrapper[4769]: W1125 09:49:42.124558 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod67c31f3a_4080_485f_b383_4998935c91c3.slice/crio-d196ab059a2013e7a7f3bb2f6d46d7ac86308269b28e86c6b8dbaa69e4604b23 WatchSource:0}: Error finding container d196ab059a2013e7a7f3bb2f6d46d7ac86308269b28e86c6b8dbaa69e4604b23: Status 404 returned error can't find the container with id d196ab059a2013e7a7f3bb2f6d46d7ac86308269b28e86c6b8dbaa69e4604b23 Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.183469 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/3427c858-9645-4274-9933-d65bba8325da-monitoring-plugin-cert\") pod \"monitoring-plugin-5bcc85f5f9-99d4v\" (UID: \"3427c858-9645-4274-9933-d65bba8325da\") " pod="openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.191255 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/3427c858-9645-4274-9933-d65bba8325da-monitoring-plugin-cert\") pod \"monitoring-plugin-5bcc85f5f9-99d4v\" (UID: \"3427c858-9645-4274-9933-d65bba8325da\") " pod="openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.252271 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.407422 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-5fddf85589-p5m9n"] Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.530277 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v"] Nov 25 09:49:42 crc kubenswrapper[4769]: W1125 09:49:42.536230 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3427c858_9645_4274_9933_d65bba8325da.slice/crio-f3991c6703d50ee80782edfe48b6ae85f31b45eed0b335f520c3e349835d53d7 WatchSource:0}: Error finding container f3991c6703d50ee80782edfe48b6ae85f31b45eed0b335f520c3e349835d53d7: Status 404 returned error can't find the container with id f3991c6703d50ee80782edfe48b6ae85f31b45eed0b335f520c3e349835d53d7 Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.559099 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.561839 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.565279 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.567587 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.570400 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-5hacg5kv5cqdb" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.570488 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.570926 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.571002 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-qhw9n" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.571198 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.571240 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.571281 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.571405 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.572101 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.572306 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.618337 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.627359 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719269 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719355 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwgbq\" (UniqueName: \"kubernetes.io/projected/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-kube-api-access-mwgbq\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719398 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-config-out\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719425 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719449 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719497 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719524 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719547 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719573 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719601 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719627 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719654 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-web-config\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719687 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719714 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719735 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719756 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719778 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.719800 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-config\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.821881 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.821975 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822034 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-config\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822065 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822106 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwgbq\" (UniqueName: \"kubernetes.io/projected/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-kube-api-access-mwgbq\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822142 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-config-out\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822168 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822190 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822243 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822276 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822302 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822335 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822366 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822398 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822426 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-web-config\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822450 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822522 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.822554 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.826532 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.827052 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.827210 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.830002 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-web-config\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.830058 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.830390 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.831438 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.832895 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.834157 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.834755 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.834774 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.836790 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-config-out\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.837683 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.842774 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.842774 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.845875 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.849728 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-config\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.852646 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwgbq\" (UniqueName: \"kubernetes.io/projected/b0883e98-eb9d-4f84-ab14-eacd1b200c0d-kube-api-access-mwgbq\") pod \"prometheus-k8s-0\" (UID: \"b0883e98-eb9d-4f84-ab14-eacd1b200c0d\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.893890 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.923093 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-c9c8696b8-vrp57" event={"ID":"67c31f3a-4080-485f-b383-4998935c91c3","Type":"ContainerStarted","Data":"85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d"} Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.923154 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-c9c8696b8-vrp57" event={"ID":"67c31f3a-4080-485f-b383-4998935c91c3","Type":"ContainerStarted","Data":"d196ab059a2013e7a7f3bb2f6d46d7ac86308269b28e86c6b8dbaa69e4604b23"} Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.937527 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" event={"ID":"ba054406-d227-497a-ab7f-fd4e85955cbf","Type":"ContainerStarted","Data":"12afec45179f4d35b72d24246d654b2530434521aea1bcd5dca4c5b7e3ebb863"} Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.937598 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" event={"ID":"ba054406-d227-497a-ab7f-fd4e85955cbf","Type":"ContainerStarted","Data":"a6c8f4443fa74e4d439ea108f271410ebcbff93a8f904e9e23f4296120fab9c5"} Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.937608 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" event={"ID":"ba054406-d227-497a-ab7f-fd4e85955cbf","Type":"ContainerStarted","Data":"a411db27f7d9c13f20e953eeb7198d7d928c359da4b6e550654bff1a44d64836"} Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.941344 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v" event={"ID":"3427c858-9645-4274-9933-d65bba8325da","Type":"ContainerStarted","Data":"f3991c6703d50ee80782edfe48b6ae85f31b45eed0b335f520c3e349835d53d7"} Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.952294 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-c9c8696b8-vrp57" podStartSLOduration=2.9522677489999998 podStartE2EDuration="2.952267749s" podCreationTimestamp="2025-11-25 09:49:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:49:42.942554599 +0000 UTC m=+331.527526912" watchObservedRunningTime="2025-11-25 09:49:42.952267749 +0000 UTC m=+331.537240062" Nov 25 09:49:42 crc kubenswrapper[4769]: I1125 09:49:42.955397 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" event={"ID":"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76","Type":"ContainerStarted","Data":"38b07206d40b0e0665ba8813a477cff3d188dc35213eab9fbb25f8c7a255dec1"} Nov 25 09:49:43 crc kubenswrapper[4769]: I1125 09:49:43.159999 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Nov 25 09:49:43 crc kubenswrapper[4769]: W1125 09:49:43.171248 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0883e98_eb9d_4f84_ab14_eacd1b200c0d.slice/crio-9d52fb67c65c6a48b39e845328a5959c8ba6c5064aff7ed27f359d3e4c6d36a0 WatchSource:0}: Error finding container 9d52fb67c65c6a48b39e845328a5959c8ba6c5064aff7ed27f359d3e4c6d36a0: Status 404 returned error can't find the container with id 9d52fb67c65c6a48b39e845328a5959c8ba6c5064aff7ed27f359d3e4c6d36a0 Nov 25 09:49:43 crc kubenswrapper[4769]: I1125 09:49:43.965078 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0883e98-eb9d-4f84-ab14-eacd1b200c0d","Type":"ContainerStarted","Data":"9d52fb67c65c6a48b39e845328a5959c8ba6c5064aff7ed27f359d3e4c6d36a0"} Nov 25 09:49:45 crc kubenswrapper[4769]: I1125 09:49:45.857495 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-kr7q7" Nov 25 09:49:45 crc kubenswrapper[4769]: I1125 09:49:45.930507 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-xrd6w"] Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.002522 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"72737271-19cc-44d8-b3f0-f668c1fe75d9","Type":"ContainerStarted","Data":"cdd21e1a47e1b235f3c31d6151bad4acd2193a18667b45bf9a28e19d8946e575"} Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.002587 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"72737271-19cc-44d8-b3f0-f668c1fe75d9","Type":"ContainerStarted","Data":"837152db72a091cfba8cd558e60ac8aa87037616a68f4397a46351b676f6d34a"} Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.002602 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"72737271-19cc-44d8-b3f0-f668c1fe75d9","Type":"ContainerStarted","Data":"a9420ae9e27272e13f7e102aadc465828c78ef5db3252f11179af1a9f5742eab"} Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.002611 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"72737271-19cc-44d8-b3f0-f668c1fe75d9","Type":"ContainerStarted","Data":"0bc7f4ccf0d8d8c94b14fadbae5ff76ff5073762ccb034f38e4b5bdc9d76d085"} Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.006952 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" event={"ID":"ba054406-d227-497a-ab7f-fd4e85955cbf","Type":"ContainerStarted","Data":"f16d9cb80202806ac4b850c38374965be05766185b788d0929b61d1a6e163299"} Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.007013 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" event={"ID":"ba054406-d227-497a-ab7f-fd4e85955cbf","Type":"ContainerStarted","Data":"d62e742aa19d73f5969826d7349c7b2d48043fc8e457d3be853576358f0500ef"} Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.007027 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" event={"ID":"ba054406-d227-497a-ab7f-fd4e85955cbf","Type":"ContainerStarted","Data":"e460274406a2d2e2f073a2e62d4b888f01642a37da436a915de048c1338cdb63"} Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.007149 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.008549 4769 generic.go:334] "Generic (PLEG): container finished" podID="b0883e98-eb9d-4f84-ab14-eacd1b200c0d" containerID="cbc2415511429ca7d36f3a42bfd6d9e5ccedba27deecf9de18954de845269c06" exitCode=0 Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.008621 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0883e98-eb9d-4f84-ab14-eacd1b200c0d","Type":"ContainerDied","Data":"cbc2415511429ca7d36f3a42bfd6d9e5ccedba27deecf9de18954de845269c06"} Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.011385 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v" event={"ID":"3427c858-9645-4274-9933-d65bba8325da","Type":"ContainerStarted","Data":"62a0c0e8b82dbe41a17697ae78a198ae852dc44592fbed981a4dcf7e3465f5fa"} Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.011993 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v" Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.017202 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" event={"ID":"6a1a9e0d-87a7-4b51-85e1-7d886adb3e76","Type":"ContainerStarted","Data":"58761b0c1eee8c5875114aa9adb5cd4896a6aeae1d95a7dd77ad505d5970508b"} Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.022345 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v" Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.047686 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" podStartSLOduration=2.18073804 podStartE2EDuration="8.047654766s" podCreationTimestamp="2025-11-25 09:49:38 +0000 UTC" firstStartedPulling="2025-11-25 09:49:39.283076227 +0000 UTC m=+327.868048540" lastFinishedPulling="2025-11-25 09:49:45.149992953 +0000 UTC m=+333.734965266" observedRunningTime="2025-11-25 09:49:46.035588248 +0000 UTC m=+334.620560571" watchObservedRunningTime="2025-11-25 09:49:46.047654766 +0000 UTC m=+334.632627079" Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.099198 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/monitoring-plugin-5bcc85f5f9-99d4v" podStartSLOduration=2.488101271 podStartE2EDuration="5.099174009s" podCreationTimestamp="2025-11-25 09:49:41 +0000 UTC" firstStartedPulling="2025-11-25 09:49:42.539598134 +0000 UTC m=+331.124570447" lastFinishedPulling="2025-11-25 09:49:45.150670872 +0000 UTC m=+333.735643185" observedRunningTime="2025-11-25 09:49:46.096108661 +0000 UTC m=+334.681080974" watchObservedRunningTime="2025-11-25 09:49:46.099174009 +0000 UTC m=+334.684146322" Nov 25 09:49:46 crc kubenswrapper[4769]: I1125 09:49:46.136902 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" podStartSLOduration=2.415546451 podStartE2EDuration="5.136879845s" podCreationTimestamp="2025-11-25 09:49:41 +0000 UTC" firstStartedPulling="2025-11-25 09:49:42.431704007 +0000 UTC m=+331.016676320" lastFinishedPulling="2025-11-25 09:49:45.153037391 +0000 UTC m=+333.738009714" observedRunningTime="2025-11-25 09:49:46.12315511 +0000 UTC m=+334.708127433" watchObservedRunningTime="2025-11-25 09:49:46.136879845 +0000 UTC m=+334.721852158" Nov 25 09:49:47 crc kubenswrapper[4769]: I1125 09:49:47.028732 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"72737271-19cc-44d8-b3f0-f668c1fe75d9","Type":"ContainerStarted","Data":"b15c44922fbe0e129508083e42516a80ed875ef2eb73e30de8b5f57cfa7273da"} Nov 25 09:49:47 crc kubenswrapper[4769]: I1125 09:49:47.029452 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"72737271-19cc-44d8-b3f0-f668c1fe75d9","Type":"ContainerStarted","Data":"48a79a5cb1234f688d034b7f4e1b0bcdf9a25f70fb4869b42bcd6251f920b157"} Nov 25 09:49:47 crc kubenswrapper[4769]: I1125 09:49:47.039755 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/thanos-querier-d49b95667-frlcl" Nov 25 09:49:47 crc kubenswrapper[4769]: I1125 09:49:47.108427 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/alertmanager-main-0" podStartSLOduration=3.462509624 podStartE2EDuration="10.108402265s" podCreationTimestamp="2025-11-25 09:49:37 +0000 UTC" firstStartedPulling="2025-11-25 09:49:38.493431135 +0000 UTC m=+327.078403448" lastFinishedPulling="2025-11-25 09:49:45.139323776 +0000 UTC m=+333.724296089" observedRunningTime="2025-11-25 09:49:47.063155422 +0000 UTC m=+335.648127755" watchObservedRunningTime="2025-11-25 09:49:47.108402265 +0000 UTC m=+335.693374568" Nov 25 09:49:50 crc kubenswrapper[4769]: I1125 09:49:50.062587 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0883e98-eb9d-4f84-ab14-eacd1b200c0d","Type":"ContainerStarted","Data":"d58fa68b0372873893819f336fb78153b9e5493794e32c67a0344c15688cfd62"} Nov 25 09:49:50 crc kubenswrapper[4769]: I1125 09:49:50.063604 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0883e98-eb9d-4f84-ab14-eacd1b200c0d","Type":"ContainerStarted","Data":"1b71a626da6e9c57a28eed11428f46245aeea85f0696c886bf1568094224d245"} Nov 25 09:49:50 crc kubenswrapper[4769]: I1125 09:49:50.063624 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0883e98-eb9d-4f84-ab14-eacd1b200c0d","Type":"ContainerStarted","Data":"5e8d6dde34c2898ddd88e01315b8554351bf05c62c90ea2bd95bc6fe43d6a8dd"} Nov 25 09:49:50 crc kubenswrapper[4769]: I1125 09:49:50.063637 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0883e98-eb9d-4f84-ab14-eacd1b200c0d","Type":"ContainerStarted","Data":"7349c652225a3ee99730e200d23ed0ee8059bfc276b2eb3fb4351ab634976fbf"} Nov 25 09:49:51 crc kubenswrapper[4769]: I1125 09:49:51.075028 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0883e98-eb9d-4f84-ab14-eacd1b200c0d","Type":"ContainerStarted","Data":"a5cc4e308add32ff87b900f557edeb6a0c4a6719c746c4d060f61655c53ef502"} Nov 25 09:49:51 crc kubenswrapper[4769]: I1125 09:49:51.075086 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"b0883e98-eb9d-4f84-ab14-eacd1b200c0d","Type":"ContainerStarted","Data":"15619bc207204cfe3266628e02b7be50a2d2562078d9438e0c594e5da72f2937"} Nov 25 09:49:51 crc kubenswrapper[4769]: I1125 09:49:51.214697 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:51 crc kubenswrapper[4769]: I1125 09:49:51.214776 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:51 crc kubenswrapper[4769]: I1125 09:49:51.221513 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:51 crc kubenswrapper[4769]: I1125 09:49:51.243051 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-k8s-0" podStartSLOduration=5.832542592 podStartE2EDuration="9.243021582s" podCreationTimestamp="2025-11-25 09:49:42 +0000 UTC" firstStartedPulling="2025-11-25 09:49:46.010883557 +0000 UTC m=+334.595855870" lastFinishedPulling="2025-11-25 09:49:49.421362547 +0000 UTC m=+338.006334860" observedRunningTime="2025-11-25 09:49:51.108170528 +0000 UTC m=+339.693142841" watchObservedRunningTime="2025-11-25 09:49:51.243021582 +0000 UTC m=+339.827993895" Nov 25 09:49:52 crc kubenswrapper[4769]: I1125 09:49:52.085998 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:49:52 crc kubenswrapper[4769]: I1125 09:49:52.152502 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-ghgwj"] Nov 25 09:49:52 crc kubenswrapper[4769]: I1125 09:49:52.894380 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:50:01 crc kubenswrapper[4769]: I1125 09:50:01.788672 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:50:01 crc kubenswrapper[4769]: I1125 09:50:01.789491 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:50:10 crc kubenswrapper[4769]: I1125 09:50:10.982419 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" podUID="fb4b7e91-71e7-4719-b0c5-35d132cf6115" containerName="registry" containerID="cri-o://e3629fe431968ff730a9b207287d490fd395ba6df8690abd47dc55dbb279520a" gracePeriod=30 Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.216869 4769 generic.go:334] "Generic (PLEG): container finished" podID="fb4b7e91-71e7-4719-b0c5-35d132cf6115" containerID="e3629fe431968ff730a9b207287d490fd395ba6df8690abd47dc55dbb279520a" exitCode=0 Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.216937 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" event={"ID":"fb4b7e91-71e7-4719-b0c5-35d132cf6115","Type":"ContainerDied","Data":"e3629fe431968ff730a9b207287d490fd395ba6df8690abd47dc55dbb279520a"} Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.330898 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.457763 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-bound-sa-token\") pod \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.457856 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fb4b7e91-71e7-4719-b0c5-35d132cf6115-ca-trust-extracted\") pod \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.457949 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-registry-tls\") pod \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.458015 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb4b7e91-71e7-4719-b0c5-35d132cf6115-trusted-ca\") pod \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.458280 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.458376 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hljhv\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-kube-api-access-hljhv\") pod \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.458436 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fb4b7e91-71e7-4719-b0c5-35d132cf6115-installation-pull-secrets\") pod \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.458498 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fb4b7e91-71e7-4719-b0c5-35d132cf6115-registry-certificates\") pod \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\" (UID: \"fb4b7e91-71e7-4719-b0c5-35d132cf6115\") " Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.459033 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb4b7e91-71e7-4719-b0c5-35d132cf6115-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "fb4b7e91-71e7-4719-b0c5-35d132cf6115" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.459420 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb4b7e91-71e7-4719-b0c5-35d132cf6115-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "fb4b7e91-71e7-4719-b0c5-35d132cf6115" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.465754 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb4b7e91-71e7-4719-b0c5-35d132cf6115-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "fb4b7e91-71e7-4719-b0c5-35d132cf6115" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.465731 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "fb4b7e91-71e7-4719-b0c5-35d132cf6115" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.466521 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-kube-api-access-hljhv" (OuterVolumeSpecName: "kube-api-access-hljhv") pod "fb4b7e91-71e7-4719-b0c5-35d132cf6115" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115"). InnerVolumeSpecName "kube-api-access-hljhv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.466950 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "fb4b7e91-71e7-4719-b0c5-35d132cf6115" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.473195 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "fb4b7e91-71e7-4719-b0c5-35d132cf6115" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.476485 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb4b7e91-71e7-4719-b0c5-35d132cf6115-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "fb4b7e91-71e7-4719-b0c5-35d132cf6115" (UID: "fb4b7e91-71e7-4719-b0c5-35d132cf6115"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.560202 4769 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.560241 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb4b7e91-71e7-4719-b0c5-35d132cf6115-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.560252 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hljhv\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-kube-api-access-hljhv\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.560268 4769 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fb4b7e91-71e7-4719-b0c5-35d132cf6115-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.560278 4769 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fb4b7e91-71e7-4719-b0c5-35d132cf6115-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.560287 4769 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fb4b7e91-71e7-4719-b0c5-35d132cf6115-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:11 crc kubenswrapper[4769]: I1125 09:50:11.560296 4769 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fb4b7e91-71e7-4719-b0c5-35d132cf6115-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:12 crc kubenswrapper[4769]: I1125 09:50:12.230376 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" event={"ID":"fb4b7e91-71e7-4719-b0c5-35d132cf6115","Type":"ContainerDied","Data":"817d608979207d4a2be3456a1413c862b9719c56296bb1aadf07c913375c6e10"} Nov 25 09:50:12 crc kubenswrapper[4769]: I1125 09:50:12.230447 4769 scope.go:117] "RemoveContainer" containerID="e3629fe431968ff730a9b207287d490fd395ba6df8690abd47dc55dbb279520a" Nov 25 09:50:12 crc kubenswrapper[4769]: I1125 09:50:12.230469 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-xrd6w" Nov 25 09:50:12 crc kubenswrapper[4769]: I1125 09:50:12.284495 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-xrd6w"] Nov 25 09:50:12 crc kubenswrapper[4769]: I1125 09:50:12.291185 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-xrd6w"] Nov 25 09:50:14 crc kubenswrapper[4769]: I1125 09:50:14.249771 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb4b7e91-71e7-4719-b0c5-35d132cf6115" path="/var/lib/kubelet/pods/fb4b7e91-71e7-4719-b0c5-35d132cf6115/volumes" Nov 25 09:50:16 crc kubenswrapper[4769]: E1125 09:50:16.003234 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:50:16 crc kubenswrapper[4769]: E1125 09:50:16.003892 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.204577 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-ghgwj" podUID="2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" containerName="console" containerID="cri-o://bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185" gracePeriod=15 Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.600478 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-ghgwj_2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4/console/0.log" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.600930 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.670048 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-serving-cert\") pod \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.670478 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-568dv\" (UniqueName: \"kubernetes.io/projected/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-kube-api-access-568dv\") pod \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.670525 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-oauth-serving-cert\") pod \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.670577 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-oauth-config\") pod \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.670618 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-trusted-ca-bundle\") pod \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.670660 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-service-ca\") pod \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.670697 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-config\") pod \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\" (UID: \"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4\") " Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.671354 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" (UID: "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.671405 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" (UID: "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.671879 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-config" (OuterVolumeSpecName: "console-config") pod "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" (UID: "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.671936 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-service-ca" (OuterVolumeSpecName: "service-ca") pod "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" (UID: "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.676714 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" (UID: "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.677045 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" (UID: "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.677326 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-kube-api-access-568dv" (OuterVolumeSpecName: "kube-api-access-568dv") pod "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" (UID: "2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4"). InnerVolumeSpecName "kube-api-access-568dv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.772384 4769 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.772853 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-568dv\" (UniqueName: \"kubernetes.io/projected/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-kube-api-access-568dv\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.772871 4769 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.772883 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.772893 4769 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.772905 4769 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:17 crc kubenswrapper[4769]: I1125 09:50:17.772916 4769 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:18 crc kubenswrapper[4769]: I1125 09:50:18.274737 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-ghgwj_2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4/console/0.log" Nov 25 09:50:18 crc kubenswrapper[4769]: I1125 09:50:18.274791 4769 generic.go:334] "Generic (PLEG): container finished" podID="2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" containerID="bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185" exitCode=2 Nov 25 09:50:18 crc kubenswrapper[4769]: I1125 09:50:18.274827 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ghgwj" event={"ID":"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4","Type":"ContainerDied","Data":"bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185"} Nov 25 09:50:18 crc kubenswrapper[4769]: I1125 09:50:18.274885 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-ghgwj" event={"ID":"2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4","Type":"ContainerDied","Data":"256dcf706467a83aaeea17bb1c3a4e9874c360be99e24b31988a49e5e71bc3c0"} Nov 25 09:50:18 crc kubenswrapper[4769]: I1125 09:50:18.274904 4769 scope.go:117] "RemoveContainer" containerID="bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185" Nov 25 09:50:18 crc kubenswrapper[4769]: I1125 09:50:18.275057 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-ghgwj" Nov 25 09:50:18 crc kubenswrapper[4769]: I1125 09:50:18.303680 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-ghgwj"] Nov 25 09:50:18 crc kubenswrapper[4769]: I1125 09:50:18.307314 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-ghgwj"] Nov 25 09:50:18 crc kubenswrapper[4769]: I1125 09:50:18.308108 4769 scope.go:117] "RemoveContainer" containerID="bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185" Nov 25 09:50:18 crc kubenswrapper[4769]: E1125 09:50:18.308635 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185\": container with ID starting with bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185 not found: ID does not exist" containerID="bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185" Nov 25 09:50:18 crc kubenswrapper[4769]: I1125 09:50:18.308770 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185"} err="failed to get container status \"bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185\": rpc error: code = NotFound desc = could not find container \"bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185\": container with ID starting with bd938aa057ee176d2433481c20fe69980cf193c32fd46e400794ff30d0fca185 not found: ID does not exist" Nov 25 09:50:20 crc kubenswrapper[4769]: I1125 09:50:20.244482 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" path="/var/lib/kubelet/pods/2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4/volumes" Nov 25 09:50:21 crc kubenswrapper[4769]: I1125 09:50:21.796622 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:50:21 crc kubenswrapper[4769]: I1125 09:50:21.803923 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-5fddf85589-p5m9n" Nov 25 09:50:22 crc kubenswrapper[4769]: I1125 09:50:22.290913 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:50:22 crc kubenswrapper[4769]: I1125 09:50:22.291613 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:50:26 crc kubenswrapper[4769]: E1125 09:50:26.165743 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:50:30 crc kubenswrapper[4769]: E1125 09:50:30.886791 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:50:36 crc kubenswrapper[4769]: E1125 09:50:36.330145 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:50:42 crc kubenswrapper[4769]: I1125 09:50:42.894146 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:50:42 crc kubenswrapper[4769]: I1125 09:50:42.935722 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:50:43 crc kubenswrapper[4769]: I1125 09:50:43.501216 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Nov 25 09:50:46 crc kubenswrapper[4769]: E1125 09:50:46.009885 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:50:46 crc kubenswrapper[4769]: E1125 09:50:46.356433 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:50:48 crc kubenswrapper[4769]: E1125 09:50:48.108631 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:50:48 crc kubenswrapper[4769]: E1125 09:50:48.109089 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:50:52 crc kubenswrapper[4769]: I1125 09:50:52.290706 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:50:52 crc kubenswrapper[4769]: I1125 09:50:52.291204 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.816124 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-78789f5b5d-sxnbq"] Nov 25 09:50:55 crc kubenswrapper[4769]: E1125 09:50:55.817272 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" containerName="console" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.817287 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" containerName="console" Nov 25 09:50:55 crc kubenswrapper[4769]: E1125 09:50:55.817308 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb4b7e91-71e7-4719-b0c5-35d132cf6115" containerName="registry" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.817316 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb4b7e91-71e7-4719-b0c5-35d132cf6115" containerName="registry" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.817448 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb4b7e91-71e7-4719-b0c5-35d132cf6115" containerName="registry" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.817479 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e9574b9-23c7-46ec-98e4-8d4b4a6d97a4" containerName="console" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.818029 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.836536 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-78789f5b5d-sxnbq"] Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.980307 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-oauth-serving-cert\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.980362 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-oauth-config\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.980388 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-serving-cert\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.980427 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-trusted-ca-bundle\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.980455 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8r4r\" (UniqueName: \"kubernetes.io/projected/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-kube-api-access-q8r4r\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.980475 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-service-ca\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:55 crc kubenswrapper[4769]: I1125 09:50:55.980495 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-config\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.082276 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-oauth-serving-cert\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.082686 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-oauth-config\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.082786 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-serving-cert\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.082907 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-trusted-ca-bundle\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.083071 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8r4r\" (UniqueName: \"kubernetes.io/projected/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-kube-api-access-q8r4r\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.083191 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-service-ca\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.083318 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-config\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.084588 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-config\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.084594 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-service-ca\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.084871 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-oauth-serving-cert\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.085519 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-trusted-ca-bundle\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.090186 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-serving-cert\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.090825 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-oauth-config\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.101977 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8r4r\" (UniqueName: \"kubernetes.io/projected/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-kube-api-access-q8r4r\") pod \"console-78789f5b5d-sxnbq\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.139938 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.359600 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-78789f5b5d-sxnbq"] Nov 25 09:50:56 crc kubenswrapper[4769]: E1125 09:50:56.553234 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.571282 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-78789f5b5d-sxnbq" event={"ID":"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812","Type":"ContainerStarted","Data":"3b2b57e5b9fb9a88e4081fb58f7886a7c02655a9c722c3341e810984cd8955c7"} Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.571361 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-78789f5b5d-sxnbq" event={"ID":"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812","Type":"ContainerStarted","Data":"e75b5ad37959e4586a1be4258b76a969d83bcb4056dcf94902b5105193c8a356"} Nov 25 09:50:56 crc kubenswrapper[4769]: I1125 09:50:56.594568 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-78789f5b5d-sxnbq" podStartSLOduration=1.594544919 podStartE2EDuration="1.594544919s" podCreationTimestamp="2025-11-25 09:50:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:50:56.590244065 +0000 UTC m=+405.175216378" watchObservedRunningTime="2025-11-25 09:50:56.594544919 +0000 UTC m=+405.179517232" Nov 25 09:51:00 crc kubenswrapper[4769]: E1125 09:51:00.887719 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:51:06 crc kubenswrapper[4769]: I1125 09:51:06.141186 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:51:06 crc kubenswrapper[4769]: I1125 09:51:06.141672 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:51:06 crc kubenswrapper[4769]: I1125 09:51:06.146795 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:51:06 crc kubenswrapper[4769]: E1125 09:51:06.586525 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb4b7e91_71e7_4719_b0c5_35d132cf6115.slice\": RecentStats: unable to find data in memory cache]" Nov 25 09:51:06 crc kubenswrapper[4769]: I1125 09:51:06.651533 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 09:51:06 crc kubenswrapper[4769]: I1125 09:51:06.713372 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-c9c8696b8-vrp57"] Nov 25 09:51:22 crc kubenswrapper[4769]: I1125 09:51:22.290539 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:51:22 crc kubenswrapper[4769]: I1125 09:51:22.291404 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:51:22 crc kubenswrapper[4769]: I1125 09:51:22.291467 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:51:22 crc kubenswrapper[4769]: I1125 09:51:22.292280 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b1cfbd0201a546442e0247c8093220790d373f3d77e8c2c87bca802822113519"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:51:22 crc kubenswrapper[4769]: I1125 09:51:22.292342 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://b1cfbd0201a546442e0247c8093220790d373f3d77e8c2c87bca802822113519" gracePeriod=600 Nov 25 09:51:22 crc kubenswrapper[4769]: I1125 09:51:22.779197 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="b1cfbd0201a546442e0247c8093220790d373f3d77e8c2c87bca802822113519" exitCode=0 Nov 25 09:51:22 crc kubenswrapper[4769]: I1125 09:51:22.779290 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"b1cfbd0201a546442e0247c8093220790d373f3d77e8c2c87bca802822113519"} Nov 25 09:51:22 crc kubenswrapper[4769]: I1125 09:51:22.779744 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"7240a68bdc93001883e181fdd6d0a4be2f87fda7024907954cc0af44e59b1c48"} Nov 25 09:51:22 crc kubenswrapper[4769]: I1125 09:51:22.779776 4769 scope.go:117] "RemoveContainer" containerID="6a47750ed21c017ef2cf17c7e8ce0676b8bd6a9fe2669c3d5adae247e557ea71" Nov 25 09:51:31 crc kubenswrapper[4769]: I1125 09:51:31.757158 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-c9c8696b8-vrp57" podUID="67c31f3a-4080-485f-b383-4998935c91c3" containerName="console" containerID="cri-o://85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d" gracePeriod=15 Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.097804 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-c9c8696b8-vrp57_67c31f3a-4080-485f-b383-4998935c91c3/console/0.log" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.098245 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.224926 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-service-ca\") pod \"67c31f3a-4080-485f-b383-4998935c91c3\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.225118 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-oauth-serving-cert\") pod \"67c31f3a-4080-485f-b383-4998935c91c3\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.225161 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/67c31f3a-4080-485f-b383-4998935c91c3-console-serving-cert\") pod \"67c31f3a-4080-485f-b383-4998935c91c3\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.225221 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-console-config\") pod \"67c31f3a-4080-485f-b383-4998935c91c3\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.225279 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/67c31f3a-4080-485f-b383-4998935c91c3-console-oauth-config\") pod \"67c31f3a-4080-485f-b383-4998935c91c3\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.225318 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-trusted-ca-bundle\") pod \"67c31f3a-4080-485f-b383-4998935c91c3\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.225382 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ccnhr\" (UniqueName: \"kubernetes.io/projected/67c31f3a-4080-485f-b383-4998935c91c3-kube-api-access-ccnhr\") pod \"67c31f3a-4080-485f-b383-4998935c91c3\" (UID: \"67c31f3a-4080-485f-b383-4998935c91c3\") " Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.226739 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "67c31f3a-4080-485f-b383-4998935c91c3" (UID: "67c31f3a-4080-485f-b383-4998935c91c3"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.226794 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-console-config" (OuterVolumeSpecName: "console-config") pod "67c31f3a-4080-485f-b383-4998935c91c3" (UID: "67c31f3a-4080-485f-b383-4998935c91c3"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.226830 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-service-ca" (OuterVolumeSpecName: "service-ca") pod "67c31f3a-4080-485f-b383-4998935c91c3" (UID: "67c31f3a-4080-485f-b383-4998935c91c3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.227049 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "67c31f3a-4080-485f-b383-4998935c91c3" (UID: "67c31f3a-4080-485f-b383-4998935c91c3"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.231138 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67c31f3a-4080-485f-b383-4998935c91c3-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "67c31f3a-4080-485f-b383-4998935c91c3" (UID: "67c31f3a-4080-485f-b383-4998935c91c3"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.231517 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67c31f3a-4080-485f-b383-4998935c91c3-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "67c31f3a-4080-485f-b383-4998935c91c3" (UID: "67c31f3a-4080-485f-b383-4998935c91c3"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.232979 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67c31f3a-4080-485f-b383-4998935c91c3-kube-api-access-ccnhr" (OuterVolumeSpecName: "kube-api-access-ccnhr") pod "67c31f3a-4080-485f-b383-4998935c91c3" (UID: "67c31f3a-4080-485f-b383-4998935c91c3"). InnerVolumeSpecName "kube-api-access-ccnhr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.328443 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ccnhr\" (UniqueName: \"kubernetes.io/projected/67c31f3a-4080-485f-b383-4998935c91c3-kube-api-access-ccnhr\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.328500 4769 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.328514 4769 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.328525 4769 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/67c31f3a-4080-485f-b383-4998935c91c3-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.328537 4769 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.328547 4769 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/67c31f3a-4080-485f-b383-4998935c91c3-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.328557 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/67c31f3a-4080-485f-b383-4998935c91c3-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.849860 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-c9c8696b8-vrp57_67c31f3a-4080-485f-b383-4998935c91c3/console/0.log" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.849933 4769 generic.go:334] "Generic (PLEG): container finished" podID="67c31f3a-4080-485f-b383-4998935c91c3" containerID="85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d" exitCode=2 Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.850006 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-c9c8696b8-vrp57" event={"ID":"67c31f3a-4080-485f-b383-4998935c91c3","Type":"ContainerDied","Data":"85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d"} Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.850048 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-c9c8696b8-vrp57" event={"ID":"67c31f3a-4080-485f-b383-4998935c91c3","Type":"ContainerDied","Data":"d196ab059a2013e7a7f3bb2f6d46d7ac86308269b28e86c6b8dbaa69e4604b23"} Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.850070 4769 scope.go:117] "RemoveContainer" containerID="85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.850070 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-c9c8696b8-vrp57" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.871232 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-c9c8696b8-vrp57"] Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.874591 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-c9c8696b8-vrp57"] Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.877569 4769 scope.go:117] "RemoveContainer" containerID="85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d" Nov 25 09:51:32 crc kubenswrapper[4769]: E1125 09:51:32.878090 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d\": container with ID starting with 85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d not found: ID does not exist" containerID="85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d" Nov 25 09:51:32 crc kubenswrapper[4769]: I1125 09:51:32.878140 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d"} err="failed to get container status \"85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d\": rpc error: code = NotFound desc = could not find container \"85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d\": container with ID starting with 85a72e051fea5120e6da6df173a280739d5379f25a4af76ca27ebea7195a6b8d not found: ID does not exist" Nov 25 09:51:34 crc kubenswrapper[4769]: I1125 09:51:34.247184 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67c31f3a-4080-485f-b383-4998935c91c3" path="/var/lib/kubelet/pods/67c31f3a-4080-485f-b383-4998935c91c3/volumes" Nov 25 09:53:22 crc kubenswrapper[4769]: I1125 09:53:22.290744 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:53:22 crc kubenswrapper[4769]: I1125 09:53:22.292196 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:53:52 crc kubenswrapper[4769]: I1125 09:53:52.290857 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:53:52 crc kubenswrapper[4769]: I1125 09:53:52.291682 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:54:22 crc kubenswrapper[4769]: I1125 09:54:22.290901 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:54:22 crc kubenswrapper[4769]: I1125 09:54:22.291763 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:54:22 crc kubenswrapper[4769]: I1125 09:54:22.291830 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:54:22 crc kubenswrapper[4769]: I1125 09:54:22.292543 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7240a68bdc93001883e181fdd6d0a4be2f87fda7024907954cc0af44e59b1c48"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:54:22 crc kubenswrapper[4769]: I1125 09:54:22.292654 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://7240a68bdc93001883e181fdd6d0a4be2f87fda7024907954cc0af44e59b1c48" gracePeriod=600 Nov 25 09:54:23 crc kubenswrapper[4769]: I1125 09:54:23.050449 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="7240a68bdc93001883e181fdd6d0a4be2f87fda7024907954cc0af44e59b1c48" exitCode=0 Nov 25 09:54:23 crc kubenswrapper[4769]: I1125 09:54:23.050535 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"7240a68bdc93001883e181fdd6d0a4be2f87fda7024907954cc0af44e59b1c48"} Nov 25 09:54:23 crc kubenswrapper[4769]: I1125 09:54:23.051467 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"2b789aba899603f1a14199b861fe977351ed8d7e8ef00a45b850160445e4d04d"} Nov 25 09:54:23 crc kubenswrapper[4769]: I1125 09:54:23.051515 4769 scope.go:117] "RemoveContainer" containerID="b1cfbd0201a546442e0247c8093220790d373f3d77e8c2c87bca802822113519" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.339800 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr"] Nov 25 09:54:33 crc kubenswrapper[4769]: E1125 09:54:33.340882 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67c31f3a-4080-485f-b383-4998935c91c3" containerName="console" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.340900 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="67c31f3a-4080-485f-b383-4998935c91c3" containerName="console" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.341154 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="67c31f3a-4080-485f-b383-4998935c91c3" containerName="console" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.342483 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.344954 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.353148 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr"] Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.494296 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw2xc\" (UniqueName: \"kubernetes.io/projected/4c6c1e32-59b2-4376-950f-077aee09fff0-kube-api-access-gw2xc\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr\" (UID: \"4c6c1e32-59b2-4376-950f-077aee09fff0\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.494363 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4c6c1e32-59b2-4376-950f-077aee09fff0-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr\" (UID: \"4c6c1e32-59b2-4376-950f-077aee09fff0\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.494401 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4c6c1e32-59b2-4376-950f-077aee09fff0-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr\" (UID: \"4c6c1e32-59b2-4376-950f-077aee09fff0\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.596208 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4c6c1e32-59b2-4376-950f-077aee09fff0-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr\" (UID: \"4c6c1e32-59b2-4376-950f-077aee09fff0\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.596347 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4c6c1e32-59b2-4376-950f-077aee09fff0-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr\" (UID: \"4c6c1e32-59b2-4376-950f-077aee09fff0\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.596523 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw2xc\" (UniqueName: \"kubernetes.io/projected/4c6c1e32-59b2-4376-950f-077aee09fff0-kube-api-access-gw2xc\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr\" (UID: \"4c6c1e32-59b2-4376-950f-077aee09fff0\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.596702 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4c6c1e32-59b2-4376-950f-077aee09fff0-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr\" (UID: \"4c6c1e32-59b2-4376-950f-077aee09fff0\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.596861 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4c6c1e32-59b2-4376-950f-077aee09fff0-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr\" (UID: \"4c6c1e32-59b2-4376-950f-077aee09fff0\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.616921 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw2xc\" (UniqueName: \"kubernetes.io/projected/4c6c1e32-59b2-4376-950f-077aee09fff0-kube-api-access-gw2xc\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr\" (UID: \"4c6c1e32-59b2-4376-950f-077aee09fff0\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.659805 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:33 crc kubenswrapper[4769]: I1125 09:54:33.854912 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr"] Nov 25 09:54:34 crc kubenswrapper[4769]: I1125 09:54:34.147167 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" event={"ID":"4c6c1e32-59b2-4376-950f-077aee09fff0","Type":"ContainerStarted","Data":"356af3023c49f92e5ed233d7601c3b43dd1bfb1fa0ca4fd3fe4a48fdec9749cc"} Nov 25 09:54:35 crc kubenswrapper[4769]: I1125 09:54:35.156357 4769 generic.go:334] "Generic (PLEG): container finished" podID="4c6c1e32-59b2-4376-950f-077aee09fff0" containerID="02ae55bcdb7bf04be5ed0c4bc5512f340afa968d9ca8d07273817d49b2efda92" exitCode=0 Nov 25 09:54:35 crc kubenswrapper[4769]: I1125 09:54:35.156437 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" event={"ID":"4c6c1e32-59b2-4376-950f-077aee09fff0","Type":"ContainerDied","Data":"02ae55bcdb7bf04be5ed0c4bc5512f340afa968d9ca8d07273817d49b2efda92"} Nov 25 09:54:35 crc kubenswrapper[4769]: I1125 09:54:35.159598 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:54:37 crc kubenswrapper[4769]: I1125 09:54:37.170156 4769 generic.go:334] "Generic (PLEG): container finished" podID="4c6c1e32-59b2-4376-950f-077aee09fff0" containerID="374b467c8447c5aeddd5b3ef9ca5b21d68fffa2e40aed8d105f8c169b6896e85" exitCode=0 Nov 25 09:54:37 crc kubenswrapper[4769]: I1125 09:54:37.170233 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" event={"ID":"4c6c1e32-59b2-4376-950f-077aee09fff0","Type":"ContainerDied","Data":"374b467c8447c5aeddd5b3ef9ca5b21d68fffa2e40aed8d105f8c169b6896e85"} Nov 25 09:54:38 crc kubenswrapper[4769]: I1125 09:54:38.182122 4769 generic.go:334] "Generic (PLEG): container finished" podID="4c6c1e32-59b2-4376-950f-077aee09fff0" containerID="a2478cbc514b563b1b11bb7f2b968702f09ee552e20690a5e3acb38c912c2a2b" exitCode=0 Nov 25 09:54:38 crc kubenswrapper[4769]: I1125 09:54:38.182197 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" event={"ID":"4c6c1e32-59b2-4376-950f-077aee09fff0","Type":"ContainerDied","Data":"a2478cbc514b563b1b11bb7f2b968702f09ee552e20690a5e3acb38c912c2a2b"} Nov 25 09:54:39 crc kubenswrapper[4769]: I1125 09:54:39.449146 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:39 crc kubenswrapper[4769]: I1125 09:54:39.599588 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gw2xc\" (UniqueName: \"kubernetes.io/projected/4c6c1e32-59b2-4376-950f-077aee09fff0-kube-api-access-gw2xc\") pod \"4c6c1e32-59b2-4376-950f-077aee09fff0\" (UID: \"4c6c1e32-59b2-4376-950f-077aee09fff0\") " Nov 25 09:54:39 crc kubenswrapper[4769]: I1125 09:54:39.599651 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4c6c1e32-59b2-4376-950f-077aee09fff0-util\") pod \"4c6c1e32-59b2-4376-950f-077aee09fff0\" (UID: \"4c6c1e32-59b2-4376-950f-077aee09fff0\") " Nov 25 09:54:39 crc kubenswrapper[4769]: I1125 09:54:39.599687 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4c6c1e32-59b2-4376-950f-077aee09fff0-bundle\") pod \"4c6c1e32-59b2-4376-950f-077aee09fff0\" (UID: \"4c6c1e32-59b2-4376-950f-077aee09fff0\") " Nov 25 09:54:39 crc kubenswrapper[4769]: I1125 09:54:39.602247 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c6c1e32-59b2-4376-950f-077aee09fff0-bundle" (OuterVolumeSpecName: "bundle") pod "4c6c1e32-59b2-4376-950f-077aee09fff0" (UID: "4c6c1e32-59b2-4376-950f-077aee09fff0"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:54:39 crc kubenswrapper[4769]: I1125 09:54:39.608249 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c6c1e32-59b2-4376-950f-077aee09fff0-kube-api-access-gw2xc" (OuterVolumeSpecName: "kube-api-access-gw2xc") pod "4c6c1e32-59b2-4376-950f-077aee09fff0" (UID: "4c6c1e32-59b2-4376-950f-077aee09fff0"). InnerVolumeSpecName "kube-api-access-gw2xc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:54:39 crc kubenswrapper[4769]: I1125 09:54:39.702346 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gw2xc\" (UniqueName: \"kubernetes.io/projected/4c6c1e32-59b2-4376-950f-077aee09fff0-kube-api-access-gw2xc\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:39 crc kubenswrapper[4769]: I1125 09:54:39.702432 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4c6c1e32-59b2-4376-950f-077aee09fff0-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:39 crc kubenswrapper[4769]: I1125 09:54:39.886884 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c6c1e32-59b2-4376-950f-077aee09fff0-util" (OuterVolumeSpecName: "util") pod "4c6c1e32-59b2-4376-950f-077aee09fff0" (UID: "4c6c1e32-59b2-4376-950f-077aee09fff0"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:54:39 crc kubenswrapper[4769]: I1125 09:54:39.907142 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4c6c1e32-59b2-4376-950f-077aee09fff0-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:40 crc kubenswrapper[4769]: I1125 09:54:40.204406 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" event={"ID":"4c6c1e32-59b2-4376-950f-077aee09fff0","Type":"ContainerDied","Data":"356af3023c49f92e5ed233d7601c3b43dd1bfb1fa0ca4fd3fe4a48fdec9749cc"} Nov 25 09:54:40 crc kubenswrapper[4769]: I1125 09:54:40.204477 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="356af3023c49f92e5ed233d7601c3b43dd1bfb1fa0ca4fd3fe4a48fdec9749cc" Nov 25 09:54:40 crc kubenswrapper[4769]: I1125 09:54:40.204545 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr" Nov 25 09:54:44 crc kubenswrapper[4769]: I1125 09:54:44.491504 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-kfvzs"] Nov 25 09:54:44 crc kubenswrapper[4769]: I1125 09:54:44.494170 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovn-controller" containerID="cri-o://ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9" gracePeriod=30 Nov 25 09:54:44 crc kubenswrapper[4769]: I1125 09:54:44.494305 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="sbdb" containerID="cri-o://010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6" gracePeriod=30 Nov 25 09:54:44 crc kubenswrapper[4769]: I1125 09:54:44.494397 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702" gracePeriod=30 Nov 25 09:54:44 crc kubenswrapper[4769]: I1125 09:54:44.494346 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="nbdb" containerID="cri-o://0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae" gracePeriod=30 Nov 25 09:54:44 crc kubenswrapper[4769]: I1125 09:54:44.494356 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="kube-rbac-proxy-node" containerID="cri-o://82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b" gracePeriod=30 Nov 25 09:54:44 crc kubenswrapper[4769]: I1125 09:54:44.494396 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovn-acl-logging" containerID="cri-o://7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a" gracePeriod=30 Nov 25 09:54:44 crc kubenswrapper[4769]: I1125 09:54:44.494279 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="northd" containerID="cri-o://79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51" gracePeriod=30 Nov 25 09:54:44 crc kubenswrapper[4769]: I1125 09:54:44.548616 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" containerID="cri-o://7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98" gracePeriod=30 Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.242191 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-s47tv_025219f0-bc69-4a33-acaa-b055607272bb/kube-multus/2.log" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.243160 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-s47tv_025219f0-bc69-4a33-acaa-b055607272bb/kube-multus/1.log" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.243218 4769 generic.go:334] "Generic (PLEG): container finished" podID="025219f0-bc69-4a33-acaa-b055607272bb" containerID="cc82a4e087ecb46a40d5a0406ed6677bd0f4e3c88a444882e56d3e955ab8749a" exitCode=2 Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.243570 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-s47tv" event={"ID":"025219f0-bc69-4a33-acaa-b055607272bb","Type":"ContainerDied","Data":"cc82a4e087ecb46a40d5a0406ed6677bd0f4e3c88a444882e56d3e955ab8749a"} Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.243891 4769 scope.go:117] "RemoveContainer" containerID="9552f354a6fdea64276d57f0df1ea99659a4cb438db1b42a3d9c9a97108c0c87" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.245589 4769 scope.go:117] "RemoveContainer" containerID="cc82a4e087ecb46a40d5a0406ed6677bd0f4e3c88a444882e56d3e955ab8749a" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.245659 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovnkube-controller/3.log" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.245887 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-s47tv_openshift-multus(025219f0-bc69-4a33-acaa-b055607272bb)\"" pod="openshift-multus/multus-s47tv" podUID="025219f0-bc69-4a33-acaa-b055607272bb" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.248415 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovn-acl-logging/0.log" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.248938 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovn-controller/0.log" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.249399 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98" exitCode=0 Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.249432 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6" exitCode=0 Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.249441 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae" exitCode=0 Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.249455 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51" exitCode=0 Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.249458 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98"} Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.249500 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6"} Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.249513 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae"} Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.249522 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51"} Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.249532 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a"} Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.249466 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a" exitCode=143 Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.249550 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9" exitCode=143 Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.249566 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9"} Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.295902 4769 scope.go:117] "RemoveContainer" containerID="c5d5017da58d670189a8129172ef3a6d333e47c6ef6cf78ca9f1392dec54530f" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.722669 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovn-acl-logging/0.log" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.723877 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovn-controller/0.log" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.724376 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804392 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5bk62"] Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804659 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="kube-rbac-proxy-node" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804677 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="kube-rbac-proxy-node" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804689 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804721 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804735 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804745 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804760 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="nbdb" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804768 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="nbdb" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804783 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="kubecfg-setup" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804791 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="kubecfg-setup" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804800 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="northd" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804807 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="northd" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804820 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="sbdb" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804828 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="sbdb" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804840 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c6c1e32-59b2-4376-950f-077aee09fff0" containerName="pull" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804848 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c6c1e32-59b2-4376-950f-077aee09fff0" containerName="pull" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804864 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovn-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804872 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovn-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804890 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovn-acl-logging" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804897 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovn-acl-logging" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804909 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c6c1e32-59b2-4376-950f-077aee09fff0" containerName="util" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804916 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c6c1e32-59b2-4376-950f-077aee09fff0" containerName="util" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804930 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804938 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804951 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.804980 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.804993 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c6c1e32-59b2-4376-950f-077aee09fff0" containerName="extract" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805001 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c6c1e32-59b2-4376-950f-077aee09fff0" containerName="extract" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805142 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovn-acl-logging" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805158 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="northd" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805173 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805182 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="kube-rbac-proxy-node" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805192 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805201 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="nbdb" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805211 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="sbdb" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805219 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805229 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovn-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805240 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805250 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805265 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c6c1e32-59b2-4376-950f-077aee09fff0" containerName="extract" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.805405 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805417 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: E1125 09:54:45.805428 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805435 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.805567 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" containerName="ovnkube-controller" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.806366 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wsqq\" (UniqueName: \"kubernetes.io/projected/069c06c6-fe60-41d0-b96d-86606f55b258-kube-api-access-9wsqq\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.806484 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-systemd\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.806595 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.806715 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-ovn\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.806819 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/069c06c6-fe60-41d0-b96d-86606f55b258-ovn-node-metrics-cert\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.806899 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-var-lib-cni-networks-ovn-kubernetes\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.806929 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-ovnkube-config\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.806953 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-cni-netd\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807000 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-node-log\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807035 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-kubelet\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807067 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807099 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807090 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-cni-bin\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807125 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-node-log" (OuterVolumeSpecName: "node-log") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807153 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807184 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807224 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-slash\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807247 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-openvswitch\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807277 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-log-socket\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807310 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-ovnkube-script-lib\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807334 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-etc-openvswitch\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807350 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807359 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-var-lib-openvswitch\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807426 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-env-overrides\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807380 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807397 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-slash" (OuterVolumeSpecName: "host-slash") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807409 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807425 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-log-socket" (OuterVolumeSpecName: "log-socket") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807476 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-systemd-units\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807510 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-run-netns\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807513 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807540 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-run-ovn-kubernetes\") pod \"069c06c6-fe60-41d0-b96d-86606f55b258\" (UID: \"069c06c6-fe60-41d0-b96d-86606f55b258\") " Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807717 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807544 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807562 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.807583 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808135 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808191 4769 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808211 4769 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808225 4769 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808237 4769 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808248 4769 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808262 4769 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808275 4769 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808288 4769 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808300 4769 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808312 4769 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-node-log\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808324 4769 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808334 4769 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808345 4769 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-host-slash\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808356 4769 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.808366 4769 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-log-socket\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.810161 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.823636 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/069c06c6-fe60-41d0-b96d-86606f55b258-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.824761 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/069c06c6-fe60-41d0-b96d-86606f55b258-kube-api-access-9wsqq" (OuterVolumeSpecName: "kube-api-access-9wsqq") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "kube-api-access-9wsqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.838628 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "069c06c6-fe60-41d0-b96d-86606f55b258" (UID: "069c06c6-fe60-41d0-b96d-86606f55b258"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.909420 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-ovnkube-config\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.909906 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-ovn-node-metrics-cert\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.909950 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-run-systemd\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.909992 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-cni-bin\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910016 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-cni-netd\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910038 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-kubelet\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910059 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-run-ovn\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910091 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnmtl\" (UniqueName: \"kubernetes.io/projected/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-kube-api-access-nnmtl\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910121 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-var-lib-openvswitch\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910151 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-log-socket\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910182 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-run-ovn-kubernetes\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910234 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-run-openvswitch\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910268 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-systemd-units\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910299 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-run-netns\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910332 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-etc-openvswitch\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910377 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910401 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-slash\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910424 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-ovnkube-script-lib\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910449 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-node-log\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910479 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-env-overrides\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910533 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wsqq\" (UniqueName: \"kubernetes.io/projected/069c06c6-fe60-41d0-b96d-86606f55b258-kube-api-access-9wsqq\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910550 4769 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/069c06c6-fe60-41d0-b96d-86606f55b258-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910563 4769 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/069c06c6-fe60-41d0-b96d-86606f55b258-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910577 4769 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:45 crc kubenswrapper[4769]: I1125 09:54:45.910589 4769 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/069c06c6-fe60-41d0-b96d-86606f55b258-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011488 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011567 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-slash\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011600 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-ovnkube-script-lib\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011623 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-node-log\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011651 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-env-overrides\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011686 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-ovnkube-config\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011710 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-ovn-node-metrics-cert\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011735 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-run-systemd\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011756 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-cni-bin\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011777 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-cni-netd\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011797 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-run-ovn\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011821 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-kubelet\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011849 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnmtl\" (UniqueName: \"kubernetes.io/projected/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-kube-api-access-nnmtl\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011899 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-var-lib-openvswitch\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011928 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-log-socket\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011953 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-run-ovn-kubernetes\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.012013 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-run-openvswitch\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.012045 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-systemd-units\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.012072 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-run-netns\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.012110 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-etc-openvswitch\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.012204 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-etc-openvswitch\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.012257 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-node-log\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.012454 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-kubelet\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.011639 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.012712 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-ovnkube-script-lib\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.012772 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-slash\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.012817 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-run-ovn-kubernetes\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.012928 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-env-overrides\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.013012 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-run-openvswitch\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.013036 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-var-lib-openvswitch\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.013064 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-run-netns\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.013044 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-systemd-units\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.013119 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-cni-bin\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.013158 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-run-systemd\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.013190 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-host-cni-netd\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.013220 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-run-ovn\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.013265 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-log-socket\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.013598 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-ovnkube-config\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.033089 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-ovn-node-metrics-cert\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.064796 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnmtl\" (UniqueName: \"kubernetes.io/projected/0c939ff9-f59e-4aca-b0db-9f130e3ebc54-kube-api-access-nnmtl\") pod \"ovnkube-node-5bk62\" (UID: \"0c939ff9-f59e-4aca-b0db-9f130e3ebc54\") " pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.173911 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.257516 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" event={"ID":"0c939ff9-f59e-4aca-b0db-9f130e3ebc54","Type":"ContainerStarted","Data":"db6fcd518c6f090d81509092eb55dd0e0d931be6b9cd0bc90a950a18f510285d"} Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.261827 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovn-acl-logging/0.log" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.262349 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-kfvzs_069c06c6-fe60-41d0-b96d-86606f55b258/ovn-controller/0.log" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.263071 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702" exitCode=0 Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.263101 4769 generic.go:334] "Generic (PLEG): container finished" podID="069c06c6-fe60-41d0-b96d-86606f55b258" containerID="82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b" exitCode=0 Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.263171 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702"} Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.263211 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b"} Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.263177 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.263225 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-kfvzs" event={"ID":"069c06c6-fe60-41d0-b96d-86606f55b258","Type":"ContainerDied","Data":"d5b0eeb88ebfd4de242398d7b2b6308ff4c161977fd5e72c60ccd1064825a64b"} Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.263280 4769 scope.go:117] "RemoveContainer" containerID="7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.270242 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-s47tv_025219f0-bc69-4a33-acaa-b055607272bb/kube-multus/2.log" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.299057 4769 scope.go:117] "RemoveContainer" containerID="010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.309985 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-kfvzs"] Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.321660 4769 scope.go:117] "RemoveContainer" containerID="0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.325460 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-kfvzs"] Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.361174 4769 scope.go:117] "RemoveContainer" containerID="79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.375493 4769 scope.go:117] "RemoveContainer" containerID="a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.394206 4769 scope.go:117] "RemoveContainer" containerID="82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.442208 4769 scope.go:117] "RemoveContainer" containerID="7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.503255 4769 scope.go:117] "RemoveContainer" containerID="ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.526855 4769 scope.go:117] "RemoveContainer" containerID="a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.544788 4769 scope.go:117] "RemoveContainer" containerID="7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98" Nov 25 09:54:46 crc kubenswrapper[4769]: E1125 09:54:46.545790 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98\": container with ID starting with 7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98 not found: ID does not exist" containerID="7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.545896 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98"} err="failed to get container status \"7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98\": rpc error: code = NotFound desc = could not find container \"7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98\": container with ID starting with 7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98 not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.545935 4769 scope.go:117] "RemoveContainer" containerID="010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6" Nov 25 09:54:46 crc kubenswrapper[4769]: E1125 09:54:46.546263 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\": container with ID starting with 010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6 not found: ID does not exist" containerID="010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.546280 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6"} err="failed to get container status \"010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\": rpc error: code = NotFound desc = could not find container \"010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\": container with ID starting with 010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6 not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.546291 4769 scope.go:117] "RemoveContainer" containerID="0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae" Nov 25 09:54:46 crc kubenswrapper[4769]: E1125 09:54:46.546529 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\": container with ID starting with 0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae not found: ID does not exist" containerID="0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.546551 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae"} err="failed to get container status \"0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\": rpc error: code = NotFound desc = could not find container \"0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\": container with ID starting with 0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.546565 4769 scope.go:117] "RemoveContainer" containerID="79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51" Nov 25 09:54:46 crc kubenswrapper[4769]: E1125 09:54:46.546733 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\": container with ID starting with 79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51 not found: ID does not exist" containerID="79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.546747 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51"} err="failed to get container status \"79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\": rpc error: code = NotFound desc = could not find container \"79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\": container with ID starting with 79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51 not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.546760 4769 scope.go:117] "RemoveContainer" containerID="a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702" Nov 25 09:54:46 crc kubenswrapper[4769]: E1125 09:54:46.547016 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\": container with ID starting with a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702 not found: ID does not exist" containerID="a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.547034 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702"} err="failed to get container status \"a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\": rpc error: code = NotFound desc = could not find container \"a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\": container with ID starting with a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702 not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.547045 4769 scope.go:117] "RemoveContainer" containerID="82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b" Nov 25 09:54:46 crc kubenswrapper[4769]: E1125 09:54:46.547326 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\": container with ID starting with 82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b not found: ID does not exist" containerID="82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.547386 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b"} err="failed to get container status \"82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\": rpc error: code = NotFound desc = could not find container \"82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\": container with ID starting with 82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.547432 4769 scope.go:117] "RemoveContainer" containerID="7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a" Nov 25 09:54:46 crc kubenswrapper[4769]: E1125 09:54:46.547814 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\": container with ID starting with 7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a not found: ID does not exist" containerID="7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.547840 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a"} err="failed to get container status \"7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\": rpc error: code = NotFound desc = could not find container \"7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\": container with ID starting with 7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.547854 4769 scope.go:117] "RemoveContainer" containerID="ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9" Nov 25 09:54:46 crc kubenswrapper[4769]: E1125 09:54:46.548085 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\": container with ID starting with ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9 not found: ID does not exist" containerID="ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.548101 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9"} err="failed to get container status \"ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\": rpc error: code = NotFound desc = could not find container \"ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\": container with ID starting with ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9 not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.548113 4769 scope.go:117] "RemoveContainer" containerID="a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065" Nov 25 09:54:46 crc kubenswrapper[4769]: E1125 09:54:46.548367 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\": container with ID starting with a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065 not found: ID does not exist" containerID="a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.548385 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065"} err="failed to get container status \"a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\": rpc error: code = NotFound desc = could not find container \"a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\": container with ID starting with a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065 not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.548398 4769 scope.go:117] "RemoveContainer" containerID="7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.548608 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98"} err="failed to get container status \"7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98\": rpc error: code = NotFound desc = could not find container \"7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98\": container with ID starting with 7f68699e162f5cdc55b3885a3e3dd00c7ef78c394d3aff1ad154563d9d00fe98 not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.548620 4769 scope.go:117] "RemoveContainer" containerID="010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.548780 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6"} err="failed to get container status \"010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\": rpc error: code = NotFound desc = could not find container \"010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6\": container with ID starting with 010a6e6fa299b12043210d61b4ac16d7d1f4d948888065c8a9e723060e2be2b6 not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.548791 4769 scope.go:117] "RemoveContainer" containerID="0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.549056 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae"} err="failed to get container status \"0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\": rpc error: code = NotFound desc = could not find container \"0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae\": container with ID starting with 0893245e50828fc44a5a9f575584f675dfd48a67e31fb78734b575e30af769ae not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.549070 4769 scope.go:117] "RemoveContainer" containerID="79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.549294 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51"} err="failed to get container status \"79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\": rpc error: code = NotFound desc = could not find container \"79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51\": container with ID starting with 79aa5dc74b62d0d36d744a8603484ce1a5199bba790a406ffa70f76b70592e51 not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.549309 4769 scope.go:117] "RemoveContainer" containerID="a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.549542 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702"} err="failed to get container status \"a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\": rpc error: code = NotFound desc = could not find container \"a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702\": container with ID starting with a41ce53b7a0ecd75b9205774e514ba01c6852f939cff8e96fa49105f4e600702 not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.549555 4769 scope.go:117] "RemoveContainer" containerID="82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.549730 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b"} err="failed to get container status \"82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\": rpc error: code = NotFound desc = could not find container \"82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b\": container with ID starting with 82259eae43bbd7b738e4f7fef449b797efc4d557417eb01b61a9ff1e6d59d07b not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.549744 4769 scope.go:117] "RemoveContainer" containerID="7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.549888 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a"} err="failed to get container status \"7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\": rpc error: code = NotFound desc = could not find container \"7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a\": container with ID starting with 7459078ef54cd1bb3171908596cdc66735a96463eafbc5a0ca21e644cd934e1a not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.549901 4769 scope.go:117] "RemoveContainer" containerID="ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.550196 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9"} err="failed to get container status \"ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\": rpc error: code = NotFound desc = could not find container \"ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9\": container with ID starting with ab25f2ae721ae8616e5ef712256055c8f8a353d898a7f2711938634d2d9fbbe9 not found: ID does not exist" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.550216 4769 scope.go:117] "RemoveContainer" containerID="a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065" Nov 25 09:54:46 crc kubenswrapper[4769]: I1125 09:54:46.550749 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065"} err="failed to get container status \"a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\": rpc error: code = NotFound desc = could not find container \"a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065\": container with ID starting with a5cfb4298bbef8e695e84c4432a4553a358f5e75564953dc8e00d47a4670a065 not found: ID does not exist" Nov 25 09:54:47 crc kubenswrapper[4769]: I1125 09:54:47.279607 4769 generic.go:334] "Generic (PLEG): container finished" podID="0c939ff9-f59e-4aca-b0db-9f130e3ebc54" containerID="eb305bf97e6fd775b8142b3a8fb2e6a0494dbd7d63b7f1b98a336484ae0d3792" exitCode=0 Nov 25 09:54:47 crc kubenswrapper[4769]: I1125 09:54:47.280008 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" event={"ID":"0c939ff9-f59e-4aca-b0db-9f130e3ebc54","Type":"ContainerDied","Data":"eb305bf97e6fd775b8142b3a8fb2e6a0494dbd7d63b7f1b98a336484ae0d3792"} Nov 25 09:54:48 crc kubenswrapper[4769]: I1125 09:54:48.247436 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="069c06c6-fe60-41d0-b96d-86606f55b258" path="/var/lib/kubelet/pods/069c06c6-fe60-41d0-b96d-86606f55b258/volumes" Nov 25 09:54:48 crc kubenswrapper[4769]: I1125 09:54:48.291520 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" event={"ID":"0c939ff9-f59e-4aca-b0db-9f130e3ebc54","Type":"ContainerStarted","Data":"dc5688256501434a0d1de263ca4b674709e6da3c2927a8adb51b9eec318df65d"} Nov 25 09:54:48 crc kubenswrapper[4769]: I1125 09:54:48.291565 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" event={"ID":"0c939ff9-f59e-4aca-b0db-9f130e3ebc54","Type":"ContainerStarted","Data":"fbe7fbabf4d05f8711ade8e78e9b71e762270c93956c6de108ba50f910d9fa52"} Nov 25 09:54:48 crc kubenswrapper[4769]: I1125 09:54:48.291576 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" event={"ID":"0c939ff9-f59e-4aca-b0db-9f130e3ebc54","Type":"ContainerStarted","Data":"828ed9f64f87acce2136f5ca184764e798091e5722cdf26a5d6e8db8d9b9b5b8"} Nov 25 09:54:48 crc kubenswrapper[4769]: I1125 09:54:48.291588 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" event={"ID":"0c939ff9-f59e-4aca-b0db-9f130e3ebc54","Type":"ContainerStarted","Data":"95f0be5dda5052b06ef48baf79559dc9fa47d0522a001d356be0f8f553744bfb"} Nov 25 09:54:50 crc kubenswrapper[4769]: I1125 09:54:50.313926 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" event={"ID":"0c939ff9-f59e-4aca-b0db-9f130e3ebc54","Type":"ContainerStarted","Data":"999d9fd95d1123730e690bf2cf8fc6066080aad59caa7b0350a57da75d8ca058"} Nov 25 09:54:50 crc kubenswrapper[4769]: I1125 09:54:50.314872 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" event={"ID":"0c939ff9-f59e-4aca-b0db-9f130e3ebc54","Type":"ContainerStarted","Data":"5842e296b1cd25ba29b0f8dd7ba7d04044874eee3a42bc65b4496457087f5e7c"} Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.081840 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66"] Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.083542 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.098379 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.099061 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.099083 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-bl6xm" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.193219 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5blt\" (UniqueName: \"kubernetes.io/projected/0c1565cf-9a88-4b55-a8b0-3ad109f1ca33-kube-api-access-p5blt\") pod \"obo-prometheus-operator-668cf9dfbb-zsv66\" (UID: \"0c1565cf-9a88-4b55-a8b0-3ad109f1ca33\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.210491 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc"] Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.211370 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.217066 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-7qxns" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.224273 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.230325 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226"] Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.231344 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.295031 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc\" (UID: \"35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.295115 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f5366bf6-8a99-4762-86f8-e0d0b94c19ed-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226\" (UID: \"f5366bf6-8a99-4762-86f8-e0d0b94c19ed\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.295362 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5blt\" (UniqueName: \"kubernetes.io/projected/0c1565cf-9a88-4b55-a8b0-3ad109f1ca33-kube-api-access-p5blt\") pod \"obo-prometheus-operator-668cf9dfbb-zsv66\" (UID: \"0c1565cf-9a88-4b55-a8b0-3ad109f1ca33\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.295537 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f5366bf6-8a99-4762-86f8-e0d0b94c19ed-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226\" (UID: \"f5366bf6-8a99-4762-86f8-e0d0b94c19ed\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.295570 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc\" (UID: \"35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.322951 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5blt\" (UniqueName: \"kubernetes.io/projected/0c1565cf-9a88-4b55-a8b0-3ad109f1ca33-kube-api-access-p5blt\") pod \"obo-prometheus-operator-668cf9dfbb-zsv66\" (UID: \"0c1565cf-9a88-4b55-a8b0-3ad109f1ca33\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.352597 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-l8nzd"] Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.353409 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.357681 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-gktzn" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.362151 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.397333 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc\" (UID: \"35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.397392 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhh59\" (UniqueName: \"kubernetes.io/projected/75c93e2f-793a-49b1-bd51-73adf5f2edaf-kube-api-access-mhh59\") pod \"observability-operator-d8bb48f5d-l8nzd\" (UID: \"75c93e2f-793a-49b1-bd51-73adf5f2edaf\") " pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.397420 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/75c93e2f-793a-49b1-bd51-73adf5f2edaf-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-l8nzd\" (UID: \"75c93e2f-793a-49b1-bd51-73adf5f2edaf\") " pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.397502 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f5366bf6-8a99-4762-86f8-e0d0b94c19ed-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226\" (UID: \"f5366bf6-8a99-4762-86f8-e0d0b94c19ed\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.397704 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f5366bf6-8a99-4762-86f8-e0d0b94c19ed-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226\" (UID: \"f5366bf6-8a99-4762-86f8-e0d0b94c19ed\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.397743 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc\" (UID: \"35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.401246 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc\" (UID: \"35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.401402 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc\" (UID: \"35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.401702 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f5366bf6-8a99-4762-86f8-e0d0b94c19ed-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226\" (UID: \"f5366bf6-8a99-4762-86f8-e0d0b94c19ed\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.403676 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.413064 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f5366bf6-8a99-4762-86f8-e0d0b94c19ed-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226\" (UID: \"f5366bf6-8a99-4762-86f8-e0d0b94c19ed\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.441747 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33_0(98935245cf770b3583a7509cce30a5eb2fd9111bcec1867a81ab200cc8acdc78): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.441854 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33_0(98935245cf770b3583a7509cce30a5eb2fd9111bcec1867a81ab200cc8acdc78): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.441889 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33_0(98935245cf770b3583a7509cce30a5eb2fd9111bcec1867a81ab200cc8acdc78): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.441951 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators(0c1565cf-9a88-4b55-a8b0-3ad109f1ca33)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators(0c1565cf-9a88-4b55-a8b0-3ad109f1ca33)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33_0(98935245cf770b3583a7509cce30a5eb2fd9111bcec1867a81ab200cc8acdc78): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" podUID="0c1565cf-9a88-4b55-a8b0-3ad109f1ca33" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.499818 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhh59\" (UniqueName: \"kubernetes.io/projected/75c93e2f-793a-49b1-bd51-73adf5f2edaf-kube-api-access-mhh59\") pod \"observability-operator-d8bb48f5d-l8nzd\" (UID: \"75c93e2f-793a-49b1-bd51-73adf5f2edaf\") " pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.499874 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/75c93e2f-793a-49b1-bd51-73adf5f2edaf-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-l8nzd\" (UID: \"75c93e2f-793a-49b1-bd51-73adf5f2edaf\") " pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.508808 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/75c93e2f-793a-49b1-bd51-73adf5f2edaf-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-l8nzd\" (UID: \"75c93e2f-793a-49b1-bd51-73adf5f2edaf\") " pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.520519 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhh59\" (UniqueName: \"kubernetes.io/projected/75c93e2f-793a-49b1-bd51-73adf5f2edaf-kube-api-access-mhh59\") pod \"observability-operator-d8bb48f5d-l8nzd\" (UID: \"75c93e2f-793a-49b1-bd51-73adf5f2edaf\") " pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.527185 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.538213 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-qfmdd"] Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.539045 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.542187 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-cdf4f" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.548116 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.568909 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff_0(55d542fd13856d5a12296ced974e454e81e3448ce1e235d743371fa4f45aa6f1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.569363 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff_0(55d542fd13856d5a12296ced974e454e81e3448ce1e235d743371fa4f45aa6f1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.569388 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff_0(55d542fd13856d5a12296ced974e454e81e3448ce1e235d743371fa4f45aa6f1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.569450 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators(35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators(35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff_0(55d542fd13856d5a12296ced974e454e81e3448ce1e235d743371fa4f45aa6f1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" podUID="35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.586415 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators_f5366bf6-8a99-4762-86f8-e0d0b94c19ed_0(6400bd74f73992348f3f5fee7c1dfd158865fe27d667a02d184de5589efb7674): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.586499 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators_f5366bf6-8a99-4762-86f8-e0d0b94c19ed_0(6400bd74f73992348f3f5fee7c1dfd158865fe27d667a02d184de5589efb7674): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.586522 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators_f5366bf6-8a99-4762-86f8-e0d0b94c19ed_0(6400bd74f73992348f3f5fee7c1dfd158865fe27d667a02d184de5589efb7674): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.586580 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators(f5366bf6-8a99-4762-86f8-e0d0b94c19ed)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators(f5366bf6-8a99-4762-86f8-e0d0b94c19ed)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators_f5366bf6-8a99-4762-86f8-e0d0b94c19ed_0(6400bd74f73992348f3f5fee7c1dfd158865fe27d667a02d184de5589efb7674): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" podUID="f5366bf6-8a99-4762-86f8-e0d0b94c19ed" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.601723 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fj8nq\" (UniqueName: \"kubernetes.io/projected/9b8ab5a0-3150-4f13-a8a1-4f8859eb1195-kube-api-access-fj8nq\") pod \"perses-operator-5446b9c989-qfmdd\" (UID: \"9b8ab5a0-3150-4f13-a8a1-4f8859eb1195\") " pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.601795 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/9b8ab5a0-3150-4f13-a8a1-4f8859eb1195-openshift-service-ca\") pod \"perses-operator-5446b9c989-qfmdd\" (UID: \"9b8ab5a0-3150-4f13-a8a1-4f8859eb1195\") " pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.670823 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.702236 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-l8nzd_openshift-operators_75c93e2f-793a-49b1-bd51-73adf5f2edaf_0(f9387bbd9cdaf2d2ddb70b88e1897c1c838d810743b6d19f443e4c244905e54d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.702331 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-l8nzd_openshift-operators_75c93e2f-793a-49b1-bd51-73adf5f2edaf_0(f9387bbd9cdaf2d2ddb70b88e1897c1c838d810743b6d19f443e4c244905e54d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.702362 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-l8nzd_openshift-operators_75c93e2f-793a-49b1-bd51-73adf5f2edaf_0(f9387bbd9cdaf2d2ddb70b88e1897c1c838d810743b6d19f443e4c244905e54d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.702428 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-l8nzd_openshift-operators(75c93e2f-793a-49b1-bd51-73adf5f2edaf)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-l8nzd_openshift-operators(75c93e2f-793a-49b1-bd51-73adf5f2edaf)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-l8nzd_openshift-operators_75c93e2f-793a-49b1-bd51-73adf5f2edaf_0(f9387bbd9cdaf2d2ddb70b88e1897c1c838d810743b6d19f443e4c244905e54d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" podUID="75c93e2f-793a-49b1-bd51-73adf5f2edaf" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.703145 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/9b8ab5a0-3150-4f13-a8a1-4f8859eb1195-openshift-service-ca\") pod \"perses-operator-5446b9c989-qfmdd\" (UID: \"9b8ab5a0-3150-4f13-a8a1-4f8859eb1195\") " pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.703315 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fj8nq\" (UniqueName: \"kubernetes.io/projected/9b8ab5a0-3150-4f13-a8a1-4f8859eb1195-kube-api-access-fj8nq\") pod \"perses-operator-5446b9c989-qfmdd\" (UID: \"9b8ab5a0-3150-4f13-a8a1-4f8859eb1195\") " pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.704330 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/9b8ab5a0-3150-4f13-a8a1-4f8859eb1195-openshift-service-ca\") pod \"perses-operator-5446b9c989-qfmdd\" (UID: \"9b8ab5a0-3150-4f13-a8a1-4f8859eb1195\") " pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.727903 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fj8nq\" (UniqueName: \"kubernetes.io/projected/9b8ab5a0-3150-4f13-a8a1-4f8859eb1195-kube-api-access-fj8nq\") pod \"perses-operator-5446b9c989-qfmdd\" (UID: \"9b8ab5a0-3150-4f13-a8a1-4f8859eb1195\") " pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:51 crc kubenswrapper[4769]: I1125 09:54:51.857684 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.887533 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qfmdd_openshift-operators_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195_0(9bfee433b2ad5c23b0f6e60f89e2897bb17b2c21926eff13ad2587f0e0301e12): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.888085 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qfmdd_openshift-operators_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195_0(9bfee433b2ad5c23b0f6e60f89e2897bb17b2c21926eff13ad2587f0e0301e12): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.888124 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qfmdd_openshift-operators_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195_0(9bfee433b2ad5c23b0f6e60f89e2897bb17b2c21926eff13ad2587f0e0301e12): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:51 crc kubenswrapper[4769]: E1125 09:54:51.888188 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-qfmdd_openshift-operators(9b8ab5a0-3150-4f13-a8a1-4f8859eb1195)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-qfmdd_openshift-operators(9b8ab5a0-3150-4f13-a8a1-4f8859eb1195)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qfmdd_openshift-operators_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195_0(9bfee433b2ad5c23b0f6e60f89e2897bb17b2c21926eff13ad2587f0e0301e12): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" podUID="9b8ab5a0-3150-4f13-a8a1-4f8859eb1195" Nov 25 09:54:52 crc kubenswrapper[4769]: I1125 09:54:52.334474 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" event={"ID":"0c939ff9-f59e-4aca-b0db-9f130e3ebc54","Type":"ContainerStarted","Data":"4de2ac4b89ae1091ed4d63c73beabef7e837cc0aea7e8942f744901b1fd04a44"} Nov 25 09:54:54 crc kubenswrapper[4769]: I1125 09:54:54.359065 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" event={"ID":"0c939ff9-f59e-4aca-b0db-9f130e3ebc54","Type":"ContainerStarted","Data":"c6ab931f826bd95dc10d45cc335dd0b66bdeca59fd8f8067a747c68b9c5ecd33"} Nov 25 09:54:54 crc kubenswrapper[4769]: I1125 09:54:54.363117 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:54 crc kubenswrapper[4769]: I1125 09:54:54.363353 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:54 crc kubenswrapper[4769]: I1125 09:54:54.363558 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:54 crc kubenswrapper[4769]: I1125 09:54:54.419832 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" podStartSLOduration=9.419810826 podStartE2EDuration="9.419810826s" podCreationTimestamp="2025-11-25 09:54:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:54:54.411768494 +0000 UTC m=+642.996740817" watchObservedRunningTime="2025-11-25 09:54:54.419810826 +0000 UTC m=+643.004783139" Nov 25 09:54:54 crc kubenswrapper[4769]: I1125 09:54:54.434154 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:54 crc kubenswrapper[4769]: I1125 09:54:54.446390 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.388705 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc"] Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.389327 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.389900 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.393020 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-qfmdd"] Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.393148 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.393595 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.396578 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66"] Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.396735 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.397414 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.411095 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226"] Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.411276 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.411883 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.416042 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-l8nzd"] Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.417094 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:55 crc kubenswrapper[4769]: I1125 09:54:55.417694 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.489464 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33_0(90674b66b574ef3e408b758c4cee0ae3604980cdb9ef571453977e7a1f295321): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.489558 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33_0(90674b66b574ef3e408b758c4cee0ae3604980cdb9ef571453977e7a1f295321): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.489592 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33_0(90674b66b574ef3e408b758c4cee0ae3604980cdb9ef571453977e7a1f295321): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.489650 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators(0c1565cf-9a88-4b55-a8b0-3ad109f1ca33)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators(0c1565cf-9a88-4b55-a8b0-3ad109f1ca33)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33_0(90674b66b574ef3e408b758c4cee0ae3604980cdb9ef571453977e7a1f295321): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" podUID="0c1565cf-9a88-4b55-a8b0-3ad109f1ca33" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.516220 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff_0(5c58d7aa5ef069aa896f559e04679beaf611e0c5bf5a65877263f3bdef9627f1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.516317 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff_0(5c58d7aa5ef069aa896f559e04679beaf611e0c5bf5a65877263f3bdef9627f1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.516354 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff_0(5c58d7aa5ef069aa896f559e04679beaf611e0c5bf5a65877263f3bdef9627f1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.516426 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators(35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators(35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff_0(5c58d7aa5ef069aa896f559e04679beaf611e0c5bf5a65877263f3bdef9627f1): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" podUID="35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.516848 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qfmdd_openshift-operators_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195_0(ab851334148ddb8c0b3b9a26fd1d71d21582fab5a1b1de18bc3dc744a03d5dce): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.516904 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qfmdd_openshift-operators_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195_0(ab851334148ddb8c0b3b9a26fd1d71d21582fab5a1b1de18bc3dc744a03d5dce): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.516925 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qfmdd_openshift-operators_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195_0(ab851334148ddb8c0b3b9a26fd1d71d21582fab5a1b1de18bc3dc744a03d5dce): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.516982 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-qfmdd_openshift-operators(9b8ab5a0-3150-4f13-a8a1-4f8859eb1195)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-qfmdd_openshift-operators(9b8ab5a0-3150-4f13-a8a1-4f8859eb1195)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qfmdd_openshift-operators_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195_0(ab851334148ddb8c0b3b9a26fd1d71d21582fab5a1b1de18bc3dc744a03d5dce): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" podUID="9b8ab5a0-3150-4f13-a8a1-4f8859eb1195" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.542321 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators_f5366bf6-8a99-4762-86f8-e0d0b94c19ed_0(b83b7063cae7a7f53214a4ddca62dd8d34c57a639724c5f896ee3d3f1b79b36e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.542398 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators_f5366bf6-8a99-4762-86f8-e0d0b94c19ed_0(b83b7063cae7a7f53214a4ddca62dd8d34c57a639724c5f896ee3d3f1b79b36e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.542427 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators_f5366bf6-8a99-4762-86f8-e0d0b94c19ed_0(b83b7063cae7a7f53214a4ddca62dd8d34c57a639724c5f896ee3d3f1b79b36e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.542496 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators(f5366bf6-8a99-4762-86f8-e0d0b94c19ed)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators(f5366bf6-8a99-4762-86f8-e0d0b94c19ed)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators_f5366bf6-8a99-4762-86f8-e0d0b94c19ed_0(b83b7063cae7a7f53214a4ddca62dd8d34c57a639724c5f896ee3d3f1b79b36e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" podUID="f5366bf6-8a99-4762-86f8-e0d0b94c19ed" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.554132 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-l8nzd_openshift-operators_75c93e2f-793a-49b1-bd51-73adf5f2edaf_0(c31b6efe2828bb4d18f1861c3e39318cc1771595b89900ecea31490b8072bc44): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.554213 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-l8nzd_openshift-operators_75c93e2f-793a-49b1-bd51-73adf5f2edaf_0(c31b6efe2828bb4d18f1861c3e39318cc1771595b89900ecea31490b8072bc44): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.554238 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-l8nzd_openshift-operators_75c93e2f-793a-49b1-bd51-73adf5f2edaf_0(c31b6efe2828bb4d18f1861c3e39318cc1771595b89900ecea31490b8072bc44): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:54:55 crc kubenswrapper[4769]: E1125 09:54:55.554283 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-l8nzd_openshift-operators(75c93e2f-793a-49b1-bd51-73adf5f2edaf)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-l8nzd_openshift-operators(75c93e2f-793a-49b1-bd51-73adf5f2edaf)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-l8nzd_openshift-operators_75c93e2f-793a-49b1-bd51-73adf5f2edaf_0(c31b6efe2828bb4d18f1861c3e39318cc1771595b89900ecea31490b8072bc44): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" podUID="75c93e2f-793a-49b1-bd51-73adf5f2edaf" Nov 25 09:54:59 crc kubenswrapper[4769]: I1125 09:54:59.237141 4769 scope.go:117] "RemoveContainer" containerID="cc82a4e087ecb46a40d5a0406ed6677bd0f4e3c88a444882e56d3e955ab8749a" Nov 25 09:54:59 crc kubenswrapper[4769]: E1125 09:54:59.239525 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-s47tv_openshift-multus(025219f0-bc69-4a33-acaa-b055607272bb)\"" pod="openshift-multus/multus-s47tv" podUID="025219f0-bc69-4a33-acaa-b055607272bb" Nov 25 09:55:07 crc kubenswrapper[4769]: I1125 09:55:07.236814 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:55:07 crc kubenswrapper[4769]: I1125 09:55:07.239171 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:55:07 crc kubenswrapper[4769]: E1125 09:55:07.269066 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators_f5366bf6-8a99-4762-86f8-e0d0b94c19ed_0(02807be4c128a16198f2ac1421d2273fc572a100fad970533b3643e466a97c6d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:55:07 crc kubenswrapper[4769]: E1125 09:55:07.269574 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators_f5366bf6-8a99-4762-86f8-e0d0b94c19ed_0(02807be4c128a16198f2ac1421d2273fc572a100fad970533b3643e466a97c6d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:55:07 crc kubenswrapper[4769]: E1125 09:55:07.269600 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators_f5366bf6-8a99-4762-86f8-e0d0b94c19ed_0(02807be4c128a16198f2ac1421d2273fc572a100fad970533b3643e466a97c6d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:55:07 crc kubenswrapper[4769]: E1125 09:55:07.269657 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators(f5366bf6-8a99-4762-86f8-e0d0b94c19ed)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators(f5366bf6-8a99-4762-86f8-e0d0b94c19ed)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_openshift-operators_f5366bf6-8a99-4762-86f8-e0d0b94c19ed_0(02807be4c128a16198f2ac1421d2273fc572a100fad970533b3643e466a97c6d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" podUID="f5366bf6-8a99-4762-86f8-e0d0b94c19ed" Nov 25 09:55:09 crc kubenswrapper[4769]: I1125 09:55:09.236058 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:55:09 crc kubenswrapper[4769]: I1125 09:55:09.238246 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:55:09 crc kubenswrapper[4769]: E1125 09:55:09.266154 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff_0(bac5b0bf69a2e0a5a6d17cd1386581dd154a49b58d170c41e0fe70d35915efe2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:55:09 crc kubenswrapper[4769]: E1125 09:55:09.266588 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff_0(bac5b0bf69a2e0a5a6d17cd1386581dd154a49b58d170c41e0fe70d35915efe2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:55:09 crc kubenswrapper[4769]: E1125 09:55:09.266620 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff_0(bac5b0bf69a2e0a5a6d17cd1386581dd154a49b58d170c41e0fe70d35915efe2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:55:09 crc kubenswrapper[4769]: E1125 09:55:09.266670 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators(35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators(35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_openshift-operators_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff_0(bac5b0bf69a2e0a5a6d17cd1386581dd154a49b58d170c41e0fe70d35915efe2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" podUID="35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff" Nov 25 09:55:10 crc kubenswrapper[4769]: I1125 09:55:10.236986 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:55:10 crc kubenswrapper[4769]: I1125 09:55:10.236999 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:55:10 crc kubenswrapper[4769]: I1125 09:55:10.237135 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:55:10 crc kubenswrapper[4769]: I1125 09:55:10.237620 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:55:10 crc kubenswrapper[4769]: I1125 09:55:10.237831 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:55:10 crc kubenswrapper[4769]: I1125 09:55:10.237910 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:55:10 crc kubenswrapper[4769]: E1125 09:55:10.282209 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-l8nzd_openshift-operators_75c93e2f-793a-49b1-bd51-73adf5f2edaf_0(079f0e8139cd7ea1d1d5ae18a456b55ca77919dddb95eff94aeff8c78b385a5e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:55:10 crc kubenswrapper[4769]: E1125 09:55:10.282295 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-l8nzd_openshift-operators_75c93e2f-793a-49b1-bd51-73adf5f2edaf_0(079f0e8139cd7ea1d1d5ae18a456b55ca77919dddb95eff94aeff8c78b385a5e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:55:10 crc kubenswrapper[4769]: E1125 09:55:10.282325 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-l8nzd_openshift-operators_75c93e2f-793a-49b1-bd51-73adf5f2edaf_0(079f0e8139cd7ea1d1d5ae18a456b55ca77919dddb95eff94aeff8c78b385a5e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:55:10 crc kubenswrapper[4769]: E1125 09:55:10.282381 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-l8nzd_openshift-operators(75c93e2f-793a-49b1-bd51-73adf5f2edaf)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-l8nzd_openshift-operators(75c93e2f-793a-49b1-bd51-73adf5f2edaf)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-l8nzd_openshift-operators_75c93e2f-793a-49b1-bd51-73adf5f2edaf_0(079f0e8139cd7ea1d1d5ae18a456b55ca77919dddb95eff94aeff8c78b385a5e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" podUID="75c93e2f-793a-49b1-bd51-73adf5f2edaf" Nov 25 09:55:10 crc kubenswrapper[4769]: E1125 09:55:10.291731 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33_0(b27c392ef59ecd2add9843f34cf731240fa7b157515f19b2908f6e33e210a05c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:55:10 crc kubenswrapper[4769]: E1125 09:55:10.291801 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33_0(b27c392ef59ecd2add9843f34cf731240fa7b157515f19b2908f6e33e210a05c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:55:10 crc kubenswrapper[4769]: E1125 09:55:10.291833 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33_0(b27c392ef59ecd2add9843f34cf731240fa7b157515f19b2908f6e33e210a05c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:55:10 crc kubenswrapper[4769]: E1125 09:55:10.291891 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators(0c1565cf-9a88-4b55-a8b0-3ad109f1ca33)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators(0c1565cf-9a88-4b55-a8b0-3ad109f1ca33)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-zsv66_openshift-operators_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33_0(b27c392ef59ecd2add9843f34cf731240fa7b157515f19b2908f6e33e210a05c): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" podUID="0c1565cf-9a88-4b55-a8b0-3ad109f1ca33" Nov 25 09:55:10 crc kubenswrapper[4769]: E1125 09:55:10.301456 4769 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qfmdd_openshift-operators_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195_0(a5c3d9b5ea5e4787d9184f352c46367aa50ae66e278ec4bb3b3ce246f886df73): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:55:10 crc kubenswrapper[4769]: E1125 09:55:10.301537 4769 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qfmdd_openshift-operators_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195_0(a5c3d9b5ea5e4787d9184f352c46367aa50ae66e278ec4bb3b3ce246f886df73): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:55:10 crc kubenswrapper[4769]: E1125 09:55:10.301561 4769 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qfmdd_openshift-operators_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195_0(a5c3d9b5ea5e4787d9184f352c46367aa50ae66e278ec4bb3b3ce246f886df73): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:55:10 crc kubenswrapper[4769]: E1125 09:55:10.301628 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-qfmdd_openshift-operators(9b8ab5a0-3150-4f13-a8a1-4f8859eb1195)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-qfmdd_openshift-operators(9b8ab5a0-3150-4f13-a8a1-4f8859eb1195)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qfmdd_openshift-operators_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195_0(a5c3d9b5ea5e4787d9184f352c46367aa50ae66e278ec4bb3b3ce246f886df73): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" podUID="9b8ab5a0-3150-4f13-a8a1-4f8859eb1195" Nov 25 09:55:14 crc kubenswrapper[4769]: I1125 09:55:14.236590 4769 scope.go:117] "RemoveContainer" containerID="cc82a4e087ecb46a40d5a0406ed6677bd0f4e3c88a444882e56d3e955ab8749a" Nov 25 09:55:14 crc kubenswrapper[4769]: I1125 09:55:14.505506 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-s47tv_025219f0-bc69-4a33-acaa-b055607272bb/kube-multus/2.log" Nov 25 09:55:14 crc kubenswrapper[4769]: I1125 09:55:14.505979 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-s47tv" event={"ID":"025219f0-bc69-4a33-acaa-b055607272bb","Type":"ContainerStarted","Data":"d4a6f688b0c8b426a58ea434868712199c1bbf2387f32034d04b0e6fe187a366"} Nov 25 09:55:16 crc kubenswrapper[4769]: I1125 09:55:16.200844 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5bk62" Nov 25 09:55:18 crc kubenswrapper[4769]: I1125 09:55:18.236436 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:55:18 crc kubenswrapper[4769]: I1125 09:55:18.237622 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" Nov 25 09:55:18 crc kubenswrapper[4769]: I1125 09:55:18.475428 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226"] Nov 25 09:55:18 crc kubenswrapper[4769]: I1125 09:55:18.532409 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" event={"ID":"f5366bf6-8a99-4762-86f8-e0d0b94c19ed","Type":"ContainerStarted","Data":"d2cd4dd043fcc8bd1d7aa601446772ceb20487ec489871f2f9c85861011053f9"} Nov 25 09:55:20 crc kubenswrapper[4769]: I1125 09:55:20.236183 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:55:20 crc kubenswrapper[4769]: I1125 09:55:20.236776 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" Nov 25 09:55:20 crc kubenswrapper[4769]: I1125 09:55:20.450046 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc"] Nov 25 09:55:20 crc kubenswrapper[4769]: I1125 09:55:20.548259 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" event={"ID":"35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff","Type":"ContainerStarted","Data":"eac1ce5ac43fb9f145b62dbb8ce0eddddfd04d20e7e68d07bc3f9d62225db184"} Nov 25 09:55:24 crc kubenswrapper[4769]: I1125 09:55:24.236236 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:55:24 crc kubenswrapper[4769]: I1125 09:55:24.237570 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.235855 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.235874 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.236991 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.237057 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.264379 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-qfmdd"] Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.498494 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-l8nzd"] Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.550419 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66"] Nov 25 09:55:25 crc kubenswrapper[4769]: W1125 09:55:25.557672 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c1565cf_9a88_4b55_a8b0_3ad109f1ca33.slice/crio-eb6a20f45de84a8623a1b5d5f1d0a2272c72142e2754347899f9f9c3cb3eac30 WatchSource:0}: Error finding container eb6a20f45de84a8623a1b5d5f1d0a2272c72142e2754347899f9f9c3cb3eac30: Status 404 returned error can't find the container with id eb6a20f45de84a8623a1b5d5f1d0a2272c72142e2754347899f9f9c3cb3eac30 Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.582218 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" event={"ID":"75c93e2f-793a-49b1-bd51-73adf5f2edaf","Type":"ContainerStarted","Data":"93ff74e76c3cfcb30756298f9381121368cb2a1e07446e62dfae341a68ef07c1"} Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.584177 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" event={"ID":"35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff","Type":"ContainerStarted","Data":"4115d4a6a2f1f89bdde972b2d65722e9a9ec99615b80b89712e79a8ad555a7f3"} Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.586204 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" event={"ID":"0c1565cf-9a88-4b55-a8b0-3ad109f1ca33","Type":"ContainerStarted","Data":"eb6a20f45de84a8623a1b5d5f1d0a2272c72142e2754347899f9f9c3cb3eac30"} Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.588191 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" event={"ID":"f5366bf6-8a99-4762-86f8-e0d0b94c19ed","Type":"ContainerStarted","Data":"b385bb7b4968865889dcc4035b5b6b546f32baa39a2fd75265006abb5765f80c"} Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.589064 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" event={"ID":"9b8ab5a0-3150-4f13-a8a1-4f8859eb1195","Type":"ContainerStarted","Data":"ac1ea29277610e8bbe878823dbdc8babab68a6057008cb2c519dd13b3f8301aa"} Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.610938 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc" podStartSLOduration=30.080454537 podStartE2EDuration="34.610910366s" podCreationTimestamp="2025-11-25 09:54:51 +0000 UTC" firstStartedPulling="2025-11-25 09:55:20.462690619 +0000 UTC m=+669.047662932" lastFinishedPulling="2025-11-25 09:55:24.993146448 +0000 UTC m=+673.578118761" observedRunningTime="2025-11-25 09:55:25.60260576 +0000 UTC m=+674.187578103" watchObservedRunningTime="2025-11-25 09:55:25.610910366 +0000 UTC m=+674.195882679" Nov 25 09:55:25 crc kubenswrapper[4769]: I1125 09:55:25.636489 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226" podStartSLOduration=28.162475221 podStartE2EDuration="34.636461422s" podCreationTimestamp="2025-11-25 09:54:51 +0000 UTC" firstStartedPulling="2025-11-25 09:55:18.494540517 +0000 UTC m=+667.079512830" lastFinishedPulling="2025-11-25 09:55:24.968526718 +0000 UTC m=+673.553499031" observedRunningTime="2025-11-25 09:55:25.632007591 +0000 UTC m=+674.216979914" watchObservedRunningTime="2025-11-25 09:55:25.636461422 +0000 UTC m=+674.221433735" Nov 25 09:55:29 crc kubenswrapper[4769]: I1125 09:55:29.630432 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" event={"ID":"9b8ab5a0-3150-4f13-a8a1-4f8859eb1195","Type":"ContainerStarted","Data":"a7eb548bcf9e5c5e0bb0124cbc217b332c1982f28ea3843bf2759bb1be23ee9e"} Nov 25 09:55:29 crc kubenswrapper[4769]: I1125 09:55:29.631238 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:55:29 crc kubenswrapper[4769]: I1125 09:55:29.633501 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" event={"ID":"0c1565cf-9a88-4b55-a8b0-3ad109f1ca33","Type":"ContainerStarted","Data":"37b4f631f811a9e912e6264a3b2987c9ba65fa28fbca3e8395b34f95aff1f463"} Nov 25 09:55:29 crc kubenswrapper[4769]: I1125 09:55:29.663442 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" podStartSLOduration=35.42720631 podStartE2EDuration="38.663413593s" podCreationTimestamp="2025-11-25 09:54:51 +0000 UTC" firstStartedPulling="2025-11-25 09:55:25.289779724 +0000 UTC m=+673.874752037" lastFinishedPulling="2025-11-25 09:55:28.525987007 +0000 UTC m=+677.110959320" observedRunningTime="2025-11-25 09:55:29.657315167 +0000 UTC m=+678.242287490" watchObservedRunningTime="2025-11-25 09:55:29.663413593 +0000 UTC m=+678.248385906" Nov 25 09:55:29 crc kubenswrapper[4769]: I1125 09:55:29.677617 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-zsv66" podStartSLOduration=35.706977286 podStartE2EDuration="38.677586939s" podCreationTimestamp="2025-11-25 09:54:51 +0000 UTC" firstStartedPulling="2025-11-25 09:55:25.561050709 +0000 UTC m=+674.146023022" lastFinishedPulling="2025-11-25 09:55:28.531660362 +0000 UTC m=+677.116632675" observedRunningTime="2025-11-25 09:55:29.67577457 +0000 UTC m=+678.260746903" watchObservedRunningTime="2025-11-25 09:55:29.677586939 +0000 UTC m=+678.262559252" Nov 25 09:55:32 crc kubenswrapper[4769]: I1125 09:55:32.664765 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" event={"ID":"75c93e2f-793a-49b1-bd51-73adf5f2edaf","Type":"ContainerStarted","Data":"f1a4c57d21098fa4df895e54feb3259e571442ecb90f2f3f4043547753efb838"} Nov 25 09:55:32 crc kubenswrapper[4769]: I1125 09:55:32.665301 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:55:32 crc kubenswrapper[4769]: I1125 09:55:32.667257 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" Nov 25 09:55:32 crc kubenswrapper[4769]: I1125 09:55:32.698645 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" podStartSLOduration=35.641778399 podStartE2EDuration="41.698618064s" podCreationTimestamp="2025-11-25 09:54:51 +0000 UTC" firstStartedPulling="2025-11-25 09:55:25.515852858 +0000 UTC m=+674.100825171" lastFinishedPulling="2025-11-25 09:55:31.572692523 +0000 UTC m=+680.157664836" observedRunningTime="2025-11-25 09:55:32.698252614 +0000 UTC m=+681.283224927" watchObservedRunningTime="2025-11-25 09:55:32.698618064 +0000 UTC m=+681.283590377" Nov 25 09:55:41 crc kubenswrapper[4769]: I1125 09:55:41.861995 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-qfmdd" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.097645 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fwc8s"] Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.099041 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.100885 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.102599 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.104814 4769 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-l6q4f" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.115504 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-662dt"] Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.116605 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-662dt" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.122627 4769 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-plqb6" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.125005 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fwc8s"] Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.129653 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-662dt"] Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.135939 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-q4rr6"] Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.136982 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-q4rr6" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.141016 4769 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-p4cx9" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.145662 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-q4rr6"] Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.242192 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4vdw\" (UniqueName: \"kubernetes.io/projected/c4701907-0ccc-4866-bb98-6005539baa95-kube-api-access-q4vdw\") pod \"cert-manager-cainjector-7f985d654d-fwc8s\" (UID: \"c4701907-0ccc-4866-bb98-6005539baa95\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.242250 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxrlz\" (UniqueName: \"kubernetes.io/projected/0c37859f-e681-4cfe-b687-967b67a62534-kube-api-access-wxrlz\") pod \"cert-manager-webhook-5655c58dd6-q4rr6\" (UID: \"0c37859f-e681-4cfe-b687-967b67a62534\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-q4rr6" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.242273 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thbxb\" (UniqueName: \"kubernetes.io/projected/8002485a-0573-48c9-aeac-f2f5a05cb1ae-kube-api-access-thbxb\") pod \"cert-manager-5b446d88c5-662dt\" (UID: \"8002485a-0573-48c9-aeac-f2f5a05cb1ae\") " pod="cert-manager/cert-manager-5b446d88c5-662dt" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.344264 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4vdw\" (UniqueName: \"kubernetes.io/projected/c4701907-0ccc-4866-bb98-6005539baa95-kube-api-access-q4vdw\") pod \"cert-manager-cainjector-7f985d654d-fwc8s\" (UID: \"c4701907-0ccc-4866-bb98-6005539baa95\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.344344 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxrlz\" (UniqueName: \"kubernetes.io/projected/0c37859f-e681-4cfe-b687-967b67a62534-kube-api-access-wxrlz\") pod \"cert-manager-webhook-5655c58dd6-q4rr6\" (UID: \"0c37859f-e681-4cfe-b687-967b67a62534\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-q4rr6" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.344373 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thbxb\" (UniqueName: \"kubernetes.io/projected/8002485a-0573-48c9-aeac-f2f5a05cb1ae-kube-api-access-thbxb\") pod \"cert-manager-5b446d88c5-662dt\" (UID: \"8002485a-0573-48c9-aeac-f2f5a05cb1ae\") " pod="cert-manager/cert-manager-5b446d88c5-662dt" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.374202 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4vdw\" (UniqueName: \"kubernetes.io/projected/c4701907-0ccc-4866-bb98-6005539baa95-kube-api-access-q4vdw\") pod \"cert-manager-cainjector-7f985d654d-fwc8s\" (UID: \"c4701907-0ccc-4866-bb98-6005539baa95\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.374288 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxrlz\" (UniqueName: \"kubernetes.io/projected/0c37859f-e681-4cfe-b687-967b67a62534-kube-api-access-wxrlz\") pod \"cert-manager-webhook-5655c58dd6-q4rr6\" (UID: \"0c37859f-e681-4cfe-b687-967b67a62534\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-q4rr6" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.380018 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thbxb\" (UniqueName: \"kubernetes.io/projected/8002485a-0573-48c9-aeac-f2f5a05cb1ae-kube-api-access-thbxb\") pod \"cert-manager-5b446d88c5-662dt\" (UID: \"8002485a-0573-48c9-aeac-f2f5a05cb1ae\") " pod="cert-manager/cert-manager-5b446d88c5-662dt" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.415293 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.433562 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-662dt" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.464399 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-q4rr6" Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.939037 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fwc8s"] Nov 25 09:55:42 crc kubenswrapper[4769]: I1125 09:55:42.951815 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-662dt"] Nov 25 09:55:43 crc kubenswrapper[4769]: I1125 09:55:43.006029 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-q4rr6"] Nov 25 09:55:43 crc kubenswrapper[4769]: W1125 09:55:43.008629 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c37859f_e681_4cfe_b687_967b67a62534.slice/crio-bc41200237732ce2c1ce948ddf4ff552263586de8d2927620bf147d7207b1ff0 WatchSource:0}: Error finding container bc41200237732ce2c1ce948ddf4ff552263586de8d2927620bf147d7207b1ff0: Status 404 returned error can't find the container with id bc41200237732ce2c1ce948ddf4ff552263586de8d2927620bf147d7207b1ff0 Nov 25 09:55:43 crc kubenswrapper[4769]: I1125 09:55:43.744987 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-662dt" event={"ID":"8002485a-0573-48c9-aeac-f2f5a05cb1ae","Type":"ContainerStarted","Data":"f1616529c713236ac65f8fb45e954eeee25bf303778d3ce63378f526699b3488"} Nov 25 09:55:43 crc kubenswrapper[4769]: I1125 09:55:43.747697 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" event={"ID":"c4701907-0ccc-4866-bb98-6005539baa95","Type":"ContainerStarted","Data":"a9bc831ffc8ebaee53c97acc638113b1e92eb04fee0a2ef391c9b9f395f3abb9"} Nov 25 09:55:43 crc kubenswrapper[4769]: I1125 09:55:43.749783 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-q4rr6" event={"ID":"0c37859f-e681-4cfe-b687-967b67a62534","Type":"ContainerStarted","Data":"bc41200237732ce2c1ce948ddf4ff552263586de8d2927620bf147d7207b1ff0"} Nov 25 09:55:48 crc kubenswrapper[4769]: I1125 09:55:48.795675 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" event={"ID":"c4701907-0ccc-4866-bb98-6005539baa95","Type":"ContainerStarted","Data":"b18182edde172f1e0a1f70d42df5bb2b91997cb1df1e264b4f617b7507df06d8"} Nov 25 09:55:48 crc kubenswrapper[4769]: I1125 09:55:48.798847 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-q4rr6" event={"ID":"0c37859f-e681-4cfe-b687-967b67a62534","Type":"ContainerStarted","Data":"f009a2000d3e73e3a30f009556db5371359a69870f80e9f569b4a1214568a0c1"} Nov 25 09:55:48 crc kubenswrapper[4769]: I1125 09:55:48.798927 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-q4rr6" Nov 25 09:55:48 crc kubenswrapper[4769]: I1125 09:55:48.800418 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-662dt" event={"ID":"8002485a-0573-48c9-aeac-f2f5a05cb1ae","Type":"ContainerStarted","Data":"ecb89a83f4fb31c70eae254318d1595d248d0c18445808dacb1d67f50209f547"} Nov 25 09:55:48 crc kubenswrapper[4769]: I1125 09:55:48.818579 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" podStartSLOduration=1.589018808 podStartE2EDuration="6.818534229s" podCreationTimestamp="2025-11-25 09:55:42 +0000 UTC" firstStartedPulling="2025-11-25 09:55:42.94702513 +0000 UTC m=+691.531997443" lastFinishedPulling="2025-11-25 09:55:48.176540551 +0000 UTC m=+696.761512864" observedRunningTime="2025-11-25 09:55:48.815561058 +0000 UTC m=+697.400533371" watchObservedRunningTime="2025-11-25 09:55:48.818534229 +0000 UTC m=+697.403506542" Nov 25 09:55:48 crc kubenswrapper[4769]: I1125 09:55:48.840068 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-662dt" podStartSLOduration=1.60193912 podStartE2EDuration="6.840049114s" podCreationTimestamp="2025-11-25 09:55:42 +0000 UTC" firstStartedPulling="2025-11-25 09:55:42.947259647 +0000 UTC m=+691.532231970" lastFinishedPulling="2025-11-25 09:55:48.185369651 +0000 UTC m=+696.770341964" observedRunningTime="2025-11-25 09:55:48.836511768 +0000 UTC m=+697.421484091" watchObservedRunningTime="2025-11-25 09:55:48.840049114 +0000 UTC m=+697.425021427" Nov 25 09:55:48 crc kubenswrapper[4769]: I1125 09:55:48.874365 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-q4rr6" podStartSLOduration=1.711317118 podStartE2EDuration="6.874338778s" podCreationTimestamp="2025-11-25 09:55:42 +0000 UTC" firstStartedPulling="2025-11-25 09:55:43.013230913 +0000 UTC m=+691.598203226" lastFinishedPulling="2025-11-25 09:55:48.176252573 +0000 UTC m=+696.761224886" observedRunningTime="2025-11-25 09:55:48.855700151 +0000 UTC m=+697.440672464" watchObservedRunningTime="2025-11-25 09:55:48.874338778 +0000 UTC m=+697.459311081" Nov 25 09:55:57 crc kubenswrapper[4769]: I1125 09:55:57.469920 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-q4rr6" Nov 25 09:56:22 crc kubenswrapper[4769]: I1125 09:56:22.290500 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:56:22 crc kubenswrapper[4769]: I1125 09:56:22.291311 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.494329 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr"] Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.497514 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.500797 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.508421 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr"] Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.678089 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d4ddb62-f09e-495e-bad9-dc38999cf28d-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr\" (UID: \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.678229 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26mlm\" (UniqueName: \"kubernetes.io/projected/0d4ddb62-f09e-495e-bad9-dc38999cf28d-kube-api-access-26mlm\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr\" (UID: \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.678290 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d4ddb62-f09e-495e-bad9-dc38999cf28d-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr\" (UID: \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.779340 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26mlm\" (UniqueName: \"kubernetes.io/projected/0d4ddb62-f09e-495e-bad9-dc38999cf28d-kube-api-access-26mlm\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr\" (UID: \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.779432 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d4ddb62-f09e-495e-bad9-dc38999cf28d-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr\" (UID: \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.779476 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d4ddb62-f09e-495e-bad9-dc38999cf28d-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr\" (UID: \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.780004 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d4ddb62-f09e-495e-bad9-dc38999cf28d-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr\" (UID: \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.780048 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d4ddb62-f09e-495e-bad9-dc38999cf28d-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr\" (UID: \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.807185 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26mlm\" (UniqueName: \"kubernetes.io/projected/0d4ddb62-f09e-495e-bad9-dc38999cf28d-kube-api-access-26mlm\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr\" (UID: \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.818649 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.913885 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85"] Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.915708 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.937069 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85"] Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.983232 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/029b4e7c-2a08-4a65-8226-904ac6eb536e-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85\" (UID: \"029b4e7c-2a08-4a65-8226-904ac6eb536e\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.983280 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkjcx\" (UniqueName: \"kubernetes.io/projected/029b4e7c-2a08-4a65-8226-904ac6eb536e-kube-api-access-mkjcx\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85\" (UID: \"029b4e7c-2a08-4a65-8226-904ac6eb536e\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:27 crc kubenswrapper[4769]: I1125 09:56:27.983311 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/029b4e7c-2a08-4a65-8226-904ac6eb536e-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85\" (UID: \"029b4e7c-2a08-4a65-8226-904ac6eb536e\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:28 crc kubenswrapper[4769]: I1125 09:56:28.086146 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/029b4e7c-2a08-4a65-8226-904ac6eb536e-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85\" (UID: \"029b4e7c-2a08-4a65-8226-904ac6eb536e\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:28 crc kubenswrapper[4769]: I1125 09:56:28.086192 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkjcx\" (UniqueName: \"kubernetes.io/projected/029b4e7c-2a08-4a65-8226-904ac6eb536e-kube-api-access-mkjcx\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85\" (UID: \"029b4e7c-2a08-4a65-8226-904ac6eb536e\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:28 crc kubenswrapper[4769]: I1125 09:56:28.086227 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/029b4e7c-2a08-4a65-8226-904ac6eb536e-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85\" (UID: \"029b4e7c-2a08-4a65-8226-904ac6eb536e\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:28 crc kubenswrapper[4769]: I1125 09:56:28.090076 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/029b4e7c-2a08-4a65-8226-904ac6eb536e-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85\" (UID: \"029b4e7c-2a08-4a65-8226-904ac6eb536e\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:28 crc kubenswrapper[4769]: I1125 09:56:28.090403 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/029b4e7c-2a08-4a65-8226-904ac6eb536e-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85\" (UID: \"029b4e7c-2a08-4a65-8226-904ac6eb536e\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:28 crc kubenswrapper[4769]: I1125 09:56:28.120151 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkjcx\" (UniqueName: \"kubernetes.io/projected/029b4e7c-2a08-4a65-8226-904ac6eb536e-kube-api-access-mkjcx\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85\" (UID: \"029b4e7c-2a08-4a65-8226-904ac6eb536e\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:28 crc kubenswrapper[4769]: I1125 09:56:28.240750 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:28 crc kubenswrapper[4769]: I1125 09:56:28.318585 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr"] Nov 25 09:56:28 crc kubenswrapper[4769]: W1125 09:56:28.330897 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4ddb62_f09e_495e_bad9_dc38999cf28d.slice/crio-6ef6acb6284f18cda1f9cef416d4760b97b78fc5a6c8ee07046059d6027dae15 WatchSource:0}: Error finding container 6ef6acb6284f18cda1f9cef416d4760b97b78fc5a6c8ee07046059d6027dae15: Status 404 returned error can't find the container with id 6ef6acb6284f18cda1f9cef416d4760b97b78fc5a6c8ee07046059d6027dae15 Nov 25 09:56:28 crc kubenswrapper[4769]: I1125 09:56:28.515755 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85"] Nov 25 09:56:29 crc kubenswrapper[4769]: I1125 09:56:29.150216 4769 generic.go:334] "Generic (PLEG): container finished" podID="029b4e7c-2a08-4a65-8226-904ac6eb536e" containerID="3d6c7fa4508b4b79af7fa125be7bc3c97c05e9a26b413a475ad72f8e00a01f81" exitCode=0 Nov 25 09:56:29 crc kubenswrapper[4769]: I1125 09:56:29.150337 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" event={"ID":"029b4e7c-2a08-4a65-8226-904ac6eb536e","Type":"ContainerDied","Data":"3d6c7fa4508b4b79af7fa125be7bc3c97c05e9a26b413a475ad72f8e00a01f81"} Nov 25 09:56:29 crc kubenswrapper[4769]: I1125 09:56:29.150746 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" event={"ID":"029b4e7c-2a08-4a65-8226-904ac6eb536e","Type":"ContainerStarted","Data":"103c29843562b64a4480c7f23c67b87f5b4f9efbeaaba04dce87fd86c0b11d55"} Nov 25 09:56:29 crc kubenswrapper[4769]: I1125 09:56:29.153580 4769 generic.go:334] "Generic (PLEG): container finished" podID="0d4ddb62-f09e-495e-bad9-dc38999cf28d" containerID="b9b214a87ee830a858b110e2ee687e312e286438df1d8dc6efb021b283b69662" exitCode=0 Nov 25 09:56:29 crc kubenswrapper[4769]: I1125 09:56:29.153632 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" event={"ID":"0d4ddb62-f09e-495e-bad9-dc38999cf28d","Type":"ContainerDied","Data":"b9b214a87ee830a858b110e2ee687e312e286438df1d8dc6efb021b283b69662"} Nov 25 09:56:29 crc kubenswrapper[4769]: I1125 09:56:29.153707 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" event={"ID":"0d4ddb62-f09e-495e-bad9-dc38999cf28d","Type":"ContainerStarted","Data":"6ef6acb6284f18cda1f9cef416d4760b97b78fc5a6c8ee07046059d6027dae15"} Nov 25 09:56:31 crc kubenswrapper[4769]: I1125 09:56:31.175247 4769 generic.go:334] "Generic (PLEG): container finished" podID="0d4ddb62-f09e-495e-bad9-dc38999cf28d" containerID="842ff841ca90960080eb17474f468e4a5d3f199899408127b4e370aba8f39f9c" exitCode=0 Nov 25 09:56:31 crc kubenswrapper[4769]: I1125 09:56:31.175395 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" event={"ID":"0d4ddb62-f09e-495e-bad9-dc38999cf28d","Type":"ContainerDied","Data":"842ff841ca90960080eb17474f468e4a5d3f199899408127b4e370aba8f39f9c"} Nov 25 09:56:31 crc kubenswrapper[4769]: I1125 09:56:31.178396 4769 generic.go:334] "Generic (PLEG): container finished" podID="029b4e7c-2a08-4a65-8226-904ac6eb536e" containerID="c62a07e50a979b06285915ca06ab154d61ceefbab42aff7743d3b6d74545dcbf" exitCode=0 Nov 25 09:56:31 crc kubenswrapper[4769]: I1125 09:56:31.178436 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" event={"ID":"029b4e7c-2a08-4a65-8226-904ac6eb536e","Type":"ContainerDied","Data":"c62a07e50a979b06285915ca06ab154d61ceefbab42aff7743d3b6d74545dcbf"} Nov 25 09:56:32 crc kubenswrapper[4769]: I1125 09:56:32.191770 4769 generic.go:334] "Generic (PLEG): container finished" podID="0d4ddb62-f09e-495e-bad9-dc38999cf28d" containerID="432102dc85d13ebd1bf09072c959259a0627f5a87169b6ee14e7dfbd707d89cd" exitCode=0 Nov 25 09:56:32 crc kubenswrapper[4769]: I1125 09:56:32.191834 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" event={"ID":"0d4ddb62-f09e-495e-bad9-dc38999cf28d","Type":"ContainerDied","Data":"432102dc85d13ebd1bf09072c959259a0627f5a87169b6ee14e7dfbd707d89cd"} Nov 25 09:56:32 crc kubenswrapper[4769]: I1125 09:56:32.195173 4769 generic.go:334] "Generic (PLEG): container finished" podID="029b4e7c-2a08-4a65-8226-904ac6eb536e" containerID="24e628e6013031de512f5d9e6f6fd8cdb93bc3b6ba999dc036b56a0f2c6e179a" exitCode=0 Nov 25 09:56:32 crc kubenswrapper[4769]: I1125 09:56:32.195217 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" event={"ID":"029b4e7c-2a08-4a65-8226-904ac6eb536e","Type":"ContainerDied","Data":"24e628e6013031de512f5d9e6f6fd8cdb93bc3b6ba999dc036b56a0f2c6e179a"} Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.536809 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.550837 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.652743 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/029b4e7c-2a08-4a65-8226-904ac6eb536e-bundle\") pod \"029b4e7c-2a08-4a65-8226-904ac6eb536e\" (UID: \"029b4e7c-2a08-4a65-8226-904ac6eb536e\") " Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.652832 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26mlm\" (UniqueName: \"kubernetes.io/projected/0d4ddb62-f09e-495e-bad9-dc38999cf28d-kube-api-access-26mlm\") pod \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\" (UID: \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\") " Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.652889 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d4ddb62-f09e-495e-bad9-dc38999cf28d-bundle\") pod \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\" (UID: \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\") " Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.652935 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/029b4e7c-2a08-4a65-8226-904ac6eb536e-util\") pod \"029b4e7c-2a08-4a65-8226-904ac6eb536e\" (UID: \"029b4e7c-2a08-4a65-8226-904ac6eb536e\") " Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.653046 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkjcx\" (UniqueName: \"kubernetes.io/projected/029b4e7c-2a08-4a65-8226-904ac6eb536e-kube-api-access-mkjcx\") pod \"029b4e7c-2a08-4a65-8226-904ac6eb536e\" (UID: \"029b4e7c-2a08-4a65-8226-904ac6eb536e\") " Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.653151 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d4ddb62-f09e-495e-bad9-dc38999cf28d-util\") pod \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\" (UID: \"0d4ddb62-f09e-495e-bad9-dc38999cf28d\") " Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.654446 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/029b4e7c-2a08-4a65-8226-904ac6eb536e-bundle" (OuterVolumeSpecName: "bundle") pod "029b4e7c-2a08-4a65-8226-904ac6eb536e" (UID: "029b4e7c-2a08-4a65-8226-904ac6eb536e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.659743 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d4ddb62-f09e-495e-bad9-dc38999cf28d-bundle" (OuterVolumeSpecName: "bundle") pod "0d4ddb62-f09e-495e-bad9-dc38999cf28d" (UID: "0d4ddb62-f09e-495e-bad9-dc38999cf28d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.663166 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/029b4e7c-2a08-4a65-8226-904ac6eb536e-kube-api-access-mkjcx" (OuterVolumeSpecName: "kube-api-access-mkjcx") pod "029b4e7c-2a08-4a65-8226-904ac6eb536e" (UID: "029b4e7c-2a08-4a65-8226-904ac6eb536e"). InnerVolumeSpecName "kube-api-access-mkjcx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.668844 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/029b4e7c-2a08-4a65-8226-904ac6eb536e-util" (OuterVolumeSpecName: "util") pod "029b4e7c-2a08-4a65-8226-904ac6eb536e" (UID: "029b4e7c-2a08-4a65-8226-904ac6eb536e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.670268 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d4ddb62-f09e-495e-bad9-dc38999cf28d-kube-api-access-26mlm" (OuterVolumeSpecName: "kube-api-access-26mlm") pod "0d4ddb62-f09e-495e-bad9-dc38999cf28d" (UID: "0d4ddb62-f09e-495e-bad9-dc38999cf28d"). InnerVolumeSpecName "kube-api-access-26mlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.675325 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d4ddb62-f09e-495e-bad9-dc38999cf28d-util" (OuterVolumeSpecName: "util") pod "0d4ddb62-f09e-495e-bad9-dc38999cf28d" (UID: "0d4ddb62-f09e-495e-bad9-dc38999cf28d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.755556 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d4ddb62-f09e-495e-bad9-dc38999cf28d-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.755621 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/029b4e7c-2a08-4a65-8226-904ac6eb536e-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.755641 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26mlm\" (UniqueName: \"kubernetes.io/projected/0d4ddb62-f09e-495e-bad9-dc38999cf28d-kube-api-access-26mlm\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.755663 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d4ddb62-f09e-495e-bad9-dc38999cf28d-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.755679 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/029b4e7c-2a08-4a65-8226-904ac6eb536e-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:33 crc kubenswrapper[4769]: I1125 09:56:33.755696 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkjcx\" (UniqueName: \"kubernetes.io/projected/029b4e7c-2a08-4a65-8226-904ac6eb536e-kube-api-access-mkjcx\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:34 crc kubenswrapper[4769]: I1125 09:56:34.213195 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" Nov 25 09:56:34 crc kubenswrapper[4769]: I1125 09:56:34.213395 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85" event={"ID":"029b4e7c-2a08-4a65-8226-904ac6eb536e","Type":"ContainerDied","Data":"103c29843562b64a4480c7f23c67b87f5b4f9efbeaaba04dce87fd86c0b11d55"} Nov 25 09:56:34 crc kubenswrapper[4769]: I1125 09:56:34.213451 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="103c29843562b64a4480c7f23c67b87f5b4f9efbeaaba04dce87fd86c0b11d55" Nov 25 09:56:34 crc kubenswrapper[4769]: I1125 09:56:34.215664 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" event={"ID":"0d4ddb62-f09e-495e-bad9-dc38999cf28d","Type":"ContainerDied","Data":"6ef6acb6284f18cda1f9cef416d4760b97b78fc5a6c8ee07046059d6027dae15"} Nov 25 09:56:34 crc kubenswrapper[4769]: I1125 09:56:34.215693 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ef6acb6284f18cda1f9cef416d4760b97b78fc5a6c8ee07046059d6027dae15" Nov 25 09:56:34 crc kubenswrapper[4769]: I1125 09:56:34.215877 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.293751 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6"] Nov 25 09:56:44 crc kubenswrapper[4769]: E1125 09:56:44.294938 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="029b4e7c-2a08-4a65-8226-904ac6eb536e" containerName="pull" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.294975 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="029b4e7c-2a08-4a65-8226-904ac6eb536e" containerName="pull" Nov 25 09:56:44 crc kubenswrapper[4769]: E1125 09:56:44.295010 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="029b4e7c-2a08-4a65-8226-904ac6eb536e" containerName="extract" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.295024 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="029b4e7c-2a08-4a65-8226-904ac6eb536e" containerName="extract" Nov 25 09:56:44 crc kubenswrapper[4769]: E1125 09:56:44.295042 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4ddb62-f09e-495e-bad9-dc38999cf28d" containerName="pull" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.295052 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4ddb62-f09e-495e-bad9-dc38999cf28d" containerName="pull" Nov 25 09:56:44 crc kubenswrapper[4769]: E1125 09:56:44.295074 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4ddb62-f09e-495e-bad9-dc38999cf28d" containerName="util" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.295082 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4ddb62-f09e-495e-bad9-dc38999cf28d" containerName="util" Nov 25 09:56:44 crc kubenswrapper[4769]: E1125 09:56:44.295094 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4ddb62-f09e-495e-bad9-dc38999cf28d" containerName="extract" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.295101 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4ddb62-f09e-495e-bad9-dc38999cf28d" containerName="extract" Nov 25 09:56:44 crc kubenswrapper[4769]: E1125 09:56:44.295122 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="029b4e7c-2a08-4a65-8226-904ac6eb536e" containerName="util" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.295131 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="029b4e7c-2a08-4a65-8226-904ac6eb536e" containerName="util" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.295281 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="029b4e7c-2a08-4a65-8226-904ac6eb536e" containerName="extract" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.295299 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4ddb62-f09e-495e-bad9-dc38999cf28d" containerName="extract" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.296300 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.299530 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.299649 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.299691 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.299745 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.299757 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.299759 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-wdv86" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.320789 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6"] Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.339505 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/930d9174-b8de-465e-a1b7-b9aa7c498246-manager-config\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.339593 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/930d9174-b8de-465e-a1b7-b9aa7c498246-webhook-cert\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.339624 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbpcw\" (UniqueName: \"kubernetes.io/projected/930d9174-b8de-465e-a1b7-b9aa7c498246-kube-api-access-pbpcw\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.339697 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/930d9174-b8de-465e-a1b7-b9aa7c498246-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.339718 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/930d9174-b8de-465e-a1b7-b9aa7c498246-apiservice-cert\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.440988 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/930d9174-b8de-465e-a1b7-b9aa7c498246-webhook-cert\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.441059 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbpcw\" (UniqueName: \"kubernetes.io/projected/930d9174-b8de-465e-a1b7-b9aa7c498246-kube-api-access-pbpcw\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.441109 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/930d9174-b8de-465e-a1b7-b9aa7c498246-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.441133 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/930d9174-b8de-465e-a1b7-b9aa7c498246-apiservice-cert\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.441176 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/930d9174-b8de-465e-a1b7-b9aa7c498246-manager-config\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.442133 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/930d9174-b8de-465e-a1b7-b9aa7c498246-manager-config\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.448675 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/930d9174-b8de-465e-a1b7-b9aa7c498246-apiservice-cert\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.456719 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/930d9174-b8de-465e-a1b7-b9aa7c498246-webhook-cert\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.461991 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbpcw\" (UniqueName: \"kubernetes.io/projected/930d9174-b8de-465e-a1b7-b9aa7c498246-kube-api-access-pbpcw\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.468781 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/930d9174-b8de-465e-a1b7-b9aa7c498246-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-cbd48d4d7-8psr6\" (UID: \"930d9174-b8de-465e-a1b7-b9aa7c498246\") " pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.620660 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:56:44 crc kubenswrapper[4769]: I1125 09:56:44.895126 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6"] Nov 25 09:56:45 crc kubenswrapper[4769]: I1125 09:56:45.323109 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" event={"ID":"930d9174-b8de-465e-a1b7-b9aa7c498246","Type":"ContainerStarted","Data":"0e2b73e6f543047579d6a0cdd787a4db0baeed42d0508a58bb5b9bcaea8b733e"} Nov 25 09:56:48 crc kubenswrapper[4769]: I1125 09:56:48.510443 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-84wrq"] Nov 25 09:56:48 crc kubenswrapper[4769]: I1125 09:56:48.512455 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-84wrq" Nov 25 09:56:48 crc kubenswrapper[4769]: I1125 09:56:48.515077 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Nov 25 09:56:48 crc kubenswrapper[4769]: I1125 09:56:48.515858 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-qzz5h" Nov 25 09:56:48 crc kubenswrapper[4769]: I1125 09:56:48.517663 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Nov 25 09:56:48 crc kubenswrapper[4769]: I1125 09:56:48.537313 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-84wrq"] Nov 25 09:56:48 crc kubenswrapper[4769]: I1125 09:56:48.627799 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24qlx\" (UniqueName: \"kubernetes.io/projected/fd5f2609-5b84-4be6-9c54-725cd7879630-kube-api-access-24qlx\") pod \"cluster-logging-operator-ff9846bd-84wrq\" (UID: \"fd5f2609-5b84-4be6-9c54-725cd7879630\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-84wrq" Nov 25 09:56:48 crc kubenswrapper[4769]: I1125 09:56:48.728909 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24qlx\" (UniqueName: \"kubernetes.io/projected/fd5f2609-5b84-4be6-9c54-725cd7879630-kube-api-access-24qlx\") pod \"cluster-logging-operator-ff9846bd-84wrq\" (UID: \"fd5f2609-5b84-4be6-9c54-725cd7879630\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-84wrq" Nov 25 09:56:48 crc kubenswrapper[4769]: I1125 09:56:48.757235 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24qlx\" (UniqueName: \"kubernetes.io/projected/fd5f2609-5b84-4be6-9c54-725cd7879630-kube-api-access-24qlx\") pod \"cluster-logging-operator-ff9846bd-84wrq\" (UID: \"fd5f2609-5b84-4be6-9c54-725cd7879630\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-84wrq" Nov 25 09:56:48 crc kubenswrapper[4769]: I1125 09:56:48.838014 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-84wrq" Nov 25 09:56:49 crc kubenswrapper[4769]: I1125 09:56:49.218173 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-84wrq"] Nov 25 09:56:51 crc kubenswrapper[4769]: I1125 09:56:51.447652 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zjqhj"] Nov 25 09:56:51 crc kubenswrapper[4769]: I1125 09:56:51.448335 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" podUID="446806b9-7941-43d7-885c-61c1d577811d" containerName="controller-manager" containerID="cri-o://b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603" gracePeriod=30 Nov 25 09:56:51 crc kubenswrapper[4769]: I1125 09:56:51.527529 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw"] Nov 25 09:56:51 crc kubenswrapper[4769]: I1125 09:56:51.527862 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" podUID="162f7ecc-6c53-4f08-955e-3ad0e5a7186e" containerName="route-controller-manager" containerID="cri-o://75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce" gracePeriod=30 Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.026237 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.111296 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-config\") pod \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.111775 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-serving-cert\") pod \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.111834 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4ddz\" (UniqueName: \"kubernetes.io/projected/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-kube-api-access-n4ddz\") pod \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.111991 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-client-ca\") pod \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\" (UID: \"162f7ecc-6c53-4f08-955e-3ad0e5a7186e\") " Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.112587 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-config" (OuterVolumeSpecName: "config") pod "162f7ecc-6c53-4f08-955e-3ad0e5a7186e" (UID: "162f7ecc-6c53-4f08-955e-3ad0e5a7186e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.113802 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-client-ca" (OuterVolumeSpecName: "client-ca") pod "162f7ecc-6c53-4f08-955e-3ad0e5a7186e" (UID: "162f7ecc-6c53-4f08-955e-3ad0e5a7186e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.118812 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "162f7ecc-6c53-4f08-955e-3ad0e5a7186e" (UID: "162f7ecc-6c53-4f08-955e-3ad0e5a7186e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.119609 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-kube-api-access-n4ddz" (OuterVolumeSpecName: "kube-api-access-n4ddz") pod "162f7ecc-6c53-4f08-955e-3ad0e5a7186e" (UID: "162f7ecc-6c53-4f08-955e-3ad0e5a7186e"). InnerVolumeSpecName "kube-api-access-n4ddz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.191774 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.214128 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.214189 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.214207 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4ddz\" (UniqueName: \"kubernetes.io/projected/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-kube-api-access-n4ddz\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.214264 4769 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/162f7ecc-6c53-4f08-955e-3ad0e5a7186e-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.291581 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.291681 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.317288 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg58z\" (UniqueName: \"kubernetes.io/projected/446806b9-7941-43d7-885c-61c1d577811d-kube-api-access-mg58z\") pod \"446806b9-7941-43d7-885c-61c1d577811d\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.317364 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/446806b9-7941-43d7-885c-61c1d577811d-serving-cert\") pod \"446806b9-7941-43d7-885c-61c1d577811d\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.317441 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-config\") pod \"446806b9-7941-43d7-885c-61c1d577811d\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.317557 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-proxy-ca-bundles\") pod \"446806b9-7941-43d7-885c-61c1d577811d\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.317578 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-client-ca\") pod \"446806b9-7941-43d7-885c-61c1d577811d\" (UID: \"446806b9-7941-43d7-885c-61c1d577811d\") " Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.318840 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-client-ca" (OuterVolumeSpecName: "client-ca") pod "446806b9-7941-43d7-885c-61c1d577811d" (UID: "446806b9-7941-43d7-885c-61c1d577811d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.320636 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-config" (OuterVolumeSpecName: "config") pod "446806b9-7941-43d7-885c-61c1d577811d" (UID: "446806b9-7941-43d7-885c-61c1d577811d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.322566 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "446806b9-7941-43d7-885c-61c1d577811d" (UID: "446806b9-7941-43d7-885c-61c1d577811d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.324600 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/446806b9-7941-43d7-885c-61c1d577811d-kube-api-access-mg58z" (OuterVolumeSpecName: "kube-api-access-mg58z") pod "446806b9-7941-43d7-885c-61c1d577811d" (UID: "446806b9-7941-43d7-885c-61c1d577811d"). InnerVolumeSpecName "kube-api-access-mg58z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.346204 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/446806b9-7941-43d7-885c-61c1d577811d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "446806b9-7941-43d7-885c-61c1d577811d" (UID: "446806b9-7941-43d7-885c-61c1d577811d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.389616 4769 generic.go:334] "Generic (PLEG): container finished" podID="446806b9-7941-43d7-885c-61c1d577811d" containerID="b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603" exitCode=0 Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.389715 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" event={"ID":"446806b9-7941-43d7-885c-61c1d577811d","Type":"ContainerDied","Data":"b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603"} Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.389751 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" event={"ID":"446806b9-7941-43d7-885c-61c1d577811d","Type":"ContainerDied","Data":"ac429310bee3eec5cf21a61ea1b7416c985b29d6fa94570cf51edbd939ff3257"} Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.389771 4769 scope.go:117] "RemoveContainer" containerID="b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.389909 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-zjqhj" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.395379 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" event={"ID":"930d9174-b8de-465e-a1b7-b9aa7c498246","Type":"ContainerStarted","Data":"20c95cf0fe1932f6b7e146e8e94f3355cf90208fb5c66c0ee3a7651b76a7628d"} Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.399199 4769 generic.go:334] "Generic (PLEG): container finished" podID="162f7ecc-6c53-4f08-955e-3ad0e5a7186e" containerID="75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce" exitCode=0 Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.399342 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.400156 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" event={"ID":"162f7ecc-6c53-4f08-955e-3ad0e5a7186e","Type":"ContainerDied","Data":"75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce"} Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.400187 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw" event={"ID":"162f7ecc-6c53-4f08-955e-3ad0e5a7186e","Type":"ContainerDied","Data":"aac22c91662c7da6d2302a4f752c7333a805bd4ef7791ad2d39e16f5115a95da"} Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.402437 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-84wrq" event={"ID":"fd5f2609-5b84-4be6-9c54-725cd7879630","Type":"ContainerStarted","Data":"5a434b02db1d7cb3dfe073e77f3ac758406ebe44034bd0b587cd54a32970c309"} Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.410579 4769 scope.go:117] "RemoveContainer" containerID="b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603" Nov 25 09:56:52 crc kubenswrapper[4769]: E1125 09:56:52.410953 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603\": container with ID starting with b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603 not found: ID does not exist" containerID="b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.410998 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603"} err="failed to get container status \"b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603\": rpc error: code = NotFound desc = could not find container \"b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603\": container with ID starting with b29ee58bf1fb60c52b2edbfbda8e315e56d6f626600bccfe4f17d018dcbe8603 not found: ID does not exist" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.411021 4769 scope.go:117] "RemoveContainer" containerID="75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.422006 4769 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.422042 4769 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.422052 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg58z\" (UniqueName: \"kubernetes.io/projected/446806b9-7941-43d7-885c-61c1d577811d-kube-api-access-mg58z\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.422064 4769 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/446806b9-7941-43d7-885c-61c1d577811d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.422073 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/446806b9-7941-43d7-885c-61c1d577811d-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.437737 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zjqhj"] Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.439932 4769 scope.go:117] "RemoveContainer" containerID="75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce" Nov 25 09:56:52 crc kubenswrapper[4769]: E1125 09:56:52.442677 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce\": container with ID starting with 75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce not found: ID does not exist" containerID="75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.442734 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce"} err="failed to get container status \"75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce\": rpc error: code = NotFound desc = could not find container \"75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce\": container with ID starting with 75e9071f4def69a4a97dba4fff22992c688707c8e7b0ef6d0474b82e2ecea9ce not found: ID does not exist" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.447452 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-zjqhj"] Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.464794 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw"] Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.467661 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-cv8sw"] Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.721463 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-789fddbbc-9kdv5"] Nov 25 09:56:52 crc kubenswrapper[4769]: E1125 09:56:52.721827 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="446806b9-7941-43d7-885c-61c1d577811d" containerName="controller-manager" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.721851 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="446806b9-7941-43d7-885c-61c1d577811d" containerName="controller-manager" Nov 25 09:56:52 crc kubenswrapper[4769]: E1125 09:56:52.721863 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="162f7ecc-6c53-4f08-955e-3ad0e5a7186e" containerName="route-controller-manager" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.721869 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="162f7ecc-6c53-4f08-955e-3ad0e5a7186e" containerName="route-controller-manager" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.722031 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="162f7ecc-6c53-4f08-955e-3ad0e5a7186e" containerName="route-controller-manager" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.722045 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="446806b9-7941-43d7-885c-61c1d577811d" containerName="controller-manager" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.722610 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.728209 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.728444 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.729175 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.729222 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.729490 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.729742 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.737334 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-789fddbbc-9kdv5"] Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.743407 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.786837 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w"] Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.789683 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.792405 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.792776 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.793267 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.793286 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.793336 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.793510 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w"] Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.795117 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.829834 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b9be60c1-31ae-434d-9a32-656d44cc40db-client-ca\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.829891 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9be60c1-31ae-434d-9a32-656d44cc40db-serving-cert\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.829926 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9be60c1-31ae-434d-9a32-656d44cc40db-config\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.830040 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b9be60c1-31ae-434d-9a32-656d44cc40db-proxy-ca-bundles\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.830075 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7rwm\" (UniqueName: \"kubernetes.io/projected/b9be60c1-31ae-434d-9a32-656d44cc40db-kube-api-access-p7rwm\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.931499 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b9be60c1-31ae-434d-9a32-656d44cc40db-proxy-ca-bundles\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.931583 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7rwm\" (UniqueName: \"kubernetes.io/projected/b9be60c1-31ae-434d-9a32-656d44cc40db-kube-api-access-p7rwm\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.931646 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bdb71f3-13ed-42c8-a349-8b5bbf4041d9-config\") pod \"route-controller-manager-7688fc9c4b-zpj9w\" (UID: \"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9\") " pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.931702 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp4qb\" (UniqueName: \"kubernetes.io/projected/9bdb71f3-13ed-42c8-a349-8b5bbf4041d9-kube-api-access-qp4qb\") pod \"route-controller-manager-7688fc9c4b-zpj9w\" (UID: \"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9\") " pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.931747 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b9be60c1-31ae-434d-9a32-656d44cc40db-client-ca\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.931779 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9be60c1-31ae-434d-9a32-656d44cc40db-serving-cert\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.931803 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bdb71f3-13ed-42c8-a349-8b5bbf4041d9-serving-cert\") pod \"route-controller-manager-7688fc9c4b-zpj9w\" (UID: \"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9\") " pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.931837 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9be60c1-31ae-434d-9a32-656d44cc40db-config\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.931865 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bdb71f3-13ed-42c8-a349-8b5bbf4041d9-client-ca\") pod \"route-controller-manager-7688fc9c4b-zpj9w\" (UID: \"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9\") " pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.932890 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b9be60c1-31ae-434d-9a32-656d44cc40db-proxy-ca-bundles\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.932900 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b9be60c1-31ae-434d-9a32-656d44cc40db-client-ca\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.933948 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b9be60c1-31ae-434d-9a32-656d44cc40db-config\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.940730 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b9be60c1-31ae-434d-9a32-656d44cc40db-serving-cert\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:52 crc kubenswrapper[4769]: I1125 09:56:52.952454 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7rwm\" (UniqueName: \"kubernetes.io/projected/b9be60c1-31ae-434d-9a32-656d44cc40db-kube-api-access-p7rwm\") pod \"controller-manager-789fddbbc-9kdv5\" (UID: \"b9be60c1-31ae-434d-9a32-656d44cc40db\") " pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:53 crc kubenswrapper[4769]: I1125 09:56:53.033553 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp4qb\" (UniqueName: \"kubernetes.io/projected/9bdb71f3-13ed-42c8-a349-8b5bbf4041d9-kube-api-access-qp4qb\") pod \"route-controller-manager-7688fc9c4b-zpj9w\" (UID: \"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9\") " pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:53 crc kubenswrapper[4769]: I1125 09:56:53.033664 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bdb71f3-13ed-42c8-a349-8b5bbf4041d9-serving-cert\") pod \"route-controller-manager-7688fc9c4b-zpj9w\" (UID: \"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9\") " pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:53 crc kubenswrapper[4769]: I1125 09:56:53.033704 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bdb71f3-13ed-42c8-a349-8b5bbf4041d9-client-ca\") pod \"route-controller-manager-7688fc9c4b-zpj9w\" (UID: \"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9\") " pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:53 crc kubenswrapper[4769]: I1125 09:56:53.033780 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bdb71f3-13ed-42c8-a349-8b5bbf4041d9-config\") pod \"route-controller-manager-7688fc9c4b-zpj9w\" (UID: \"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9\") " pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:53 crc kubenswrapper[4769]: I1125 09:56:53.035102 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bdb71f3-13ed-42c8-a349-8b5bbf4041d9-client-ca\") pod \"route-controller-manager-7688fc9c4b-zpj9w\" (UID: \"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9\") " pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:53 crc kubenswrapper[4769]: I1125 09:56:53.035394 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bdb71f3-13ed-42c8-a349-8b5bbf4041d9-config\") pod \"route-controller-manager-7688fc9c4b-zpj9w\" (UID: \"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9\") " pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:53 crc kubenswrapper[4769]: I1125 09:56:53.037407 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bdb71f3-13ed-42c8-a349-8b5bbf4041d9-serving-cert\") pod \"route-controller-manager-7688fc9c4b-zpj9w\" (UID: \"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9\") " pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:53 crc kubenswrapper[4769]: I1125 09:56:53.046292 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:53 crc kubenswrapper[4769]: I1125 09:56:53.059169 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp4qb\" (UniqueName: \"kubernetes.io/projected/9bdb71f3-13ed-42c8-a349-8b5bbf4041d9-kube-api-access-qp4qb\") pod \"route-controller-manager-7688fc9c4b-zpj9w\" (UID: \"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9\") " pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:53 crc kubenswrapper[4769]: I1125 09:56:53.123516 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:53 crc kubenswrapper[4769]: I1125 09:56:53.526569 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-789fddbbc-9kdv5"] Nov 25 09:56:53 crc kubenswrapper[4769]: I1125 09:56:53.639940 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w"] Nov 25 09:56:54 crc kubenswrapper[4769]: I1125 09:56:54.246847 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="162f7ecc-6c53-4f08-955e-3ad0e5a7186e" path="/var/lib/kubelet/pods/162f7ecc-6c53-4f08-955e-3ad0e5a7186e/volumes" Nov 25 09:56:54 crc kubenswrapper[4769]: I1125 09:56:54.248409 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="446806b9-7941-43d7-885c-61c1d577811d" path="/var/lib/kubelet/pods/446806b9-7941-43d7-885c-61c1d577811d/volumes" Nov 25 09:56:54 crc kubenswrapper[4769]: I1125 09:56:54.424514 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" event={"ID":"b9be60c1-31ae-434d-9a32-656d44cc40db","Type":"ContainerStarted","Data":"74ac8c64de500e27c193e431871f6d3b4d54345b9e695a335ec02f71f9dccafd"} Nov 25 09:56:54 crc kubenswrapper[4769]: I1125 09:56:54.424573 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" event={"ID":"b9be60c1-31ae-434d-9a32-656d44cc40db","Type":"ContainerStarted","Data":"53622eb9e4313949efa9cc4f658f76eaf37a1d58f7d3e2b99f49d081be80e8bd"} Nov 25 09:56:54 crc kubenswrapper[4769]: I1125 09:56:54.424860 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:54 crc kubenswrapper[4769]: I1125 09:56:54.427460 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" event={"ID":"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9","Type":"ContainerStarted","Data":"9b4e54b590bb36920dfdabc21103d47c988a4f22d9789d77fd1632bf975c01b5"} Nov 25 09:56:54 crc kubenswrapper[4769]: I1125 09:56:54.427505 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" event={"ID":"9bdb71f3-13ed-42c8-a349-8b5bbf4041d9","Type":"ContainerStarted","Data":"38531e70ef1c5f37faf5fc92aa7f98a1958d9e732b41c7d52dc031c4ee8e6d15"} Nov 25 09:56:54 crc kubenswrapper[4769]: I1125 09:56:54.427700 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:54 crc kubenswrapper[4769]: I1125 09:56:54.434585 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" Nov 25 09:56:54 crc kubenswrapper[4769]: I1125 09:56:54.435913 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" Nov 25 09:56:54 crc kubenswrapper[4769]: I1125 09:56:54.458781 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-789fddbbc-9kdv5" podStartSLOduration=2.458760122 podStartE2EDuration="2.458760122s" podCreationTimestamp="2025-11-25 09:56:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:56:54.456128791 +0000 UTC m=+763.041101104" watchObservedRunningTime="2025-11-25 09:56:54.458760122 +0000 UTC m=+763.043732435" Nov 25 09:56:54 crc kubenswrapper[4769]: I1125 09:56:54.478657 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7688fc9c4b-zpj9w" podStartSLOduration=2.478629753 podStartE2EDuration="2.478629753s" podCreationTimestamp="2025-11-25 09:56:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:56:54.476171766 +0000 UTC m=+763.061144079" watchObservedRunningTime="2025-11-25 09:56:54.478629753 +0000 UTC m=+763.063602066" Nov 25 09:56:55 crc kubenswrapper[4769]: I1125 09:56:55.722731 4769 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:57:06 crc kubenswrapper[4769]: I1125 09:57:06.530596 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" event={"ID":"930d9174-b8de-465e-a1b7-b9aa7c498246","Type":"ContainerStarted","Data":"ef6818f22ca030420db461ca27c6891f6871364cdd539f39d116df20baaa5462"} Nov 25 09:57:06 crc kubenswrapper[4769]: I1125 09:57:06.531554 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:57:06 crc kubenswrapper[4769]: I1125 09:57:06.533226 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-84wrq" event={"ID":"fd5f2609-5b84-4be6-9c54-725cd7879630","Type":"ContainerStarted","Data":"5743f903d928eb91c475e7b7d6453c6220352ee22ba31f0ecc2bd02a00507d80"} Nov 25 09:57:06 crc kubenswrapper[4769]: I1125 09:57:06.533355 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 09:57:06 crc kubenswrapper[4769]: I1125 09:57:06.556775 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" podStartSLOduration=1.484377711 podStartE2EDuration="22.556753467s" podCreationTimestamp="2025-11-25 09:56:44 +0000 UTC" firstStartedPulling="2025-11-25 09:56:44.908705912 +0000 UTC m=+753.493678225" lastFinishedPulling="2025-11-25 09:57:05.981081648 +0000 UTC m=+774.566053981" observedRunningTime="2025-11-25 09:57:06.550696852 +0000 UTC m=+775.135669205" watchObservedRunningTime="2025-11-25 09:57:06.556753467 +0000 UTC m=+775.141725770" Nov 25 09:57:06 crc kubenswrapper[4769]: I1125 09:57:06.598103 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-ff9846bd-84wrq" podStartSLOduration=4.398060852 podStartE2EDuration="18.598079841s" podCreationTimestamp="2025-11-25 09:56:48 +0000 UTC" firstStartedPulling="2025-11-25 09:56:51.775635042 +0000 UTC m=+760.360607355" lastFinishedPulling="2025-11-25 09:57:05.975654031 +0000 UTC m=+774.560626344" observedRunningTime="2025-11-25 09:57:06.59731163 +0000 UTC m=+775.182283983" watchObservedRunningTime="2025-11-25 09:57:06.598079841 +0000 UTC m=+775.183052154" Nov 25 09:57:10 crc kubenswrapper[4769]: I1125 09:57:10.858943 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Nov 25 09:57:10 crc kubenswrapper[4769]: I1125 09:57:10.860408 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Nov 25 09:57:10 crc kubenswrapper[4769]: I1125 09:57:10.866631 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Nov 25 09:57:10 crc kubenswrapper[4769]: I1125 09:57:10.869262 4769 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-vn64l" Nov 25 09:57:10 crc kubenswrapper[4769]: I1125 09:57:10.873128 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Nov 25 09:57:10 crc kubenswrapper[4769]: I1125 09:57:10.875365 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Nov 25 09:57:10 crc kubenswrapper[4769]: I1125 09:57:10.901334 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nkxs\" (UniqueName: \"kubernetes.io/projected/1f16e4ac-5fb0-46bc-9d70-f148c65cb11b-kube-api-access-8nkxs\") pod \"minio\" (UID: \"1f16e4ac-5fb0-46bc-9d70-f148c65cb11b\") " pod="minio-dev/minio" Nov 25 09:57:10 crc kubenswrapper[4769]: I1125 09:57:10.901419 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-1378bc59-a238-4c5e-94f9-68a2f07056d5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1378bc59-a238-4c5e-94f9-68a2f07056d5\") pod \"minio\" (UID: \"1f16e4ac-5fb0-46bc-9d70-f148c65cb11b\") " pod="minio-dev/minio" Nov 25 09:57:11 crc kubenswrapper[4769]: I1125 09:57:11.003329 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nkxs\" (UniqueName: \"kubernetes.io/projected/1f16e4ac-5fb0-46bc-9d70-f148c65cb11b-kube-api-access-8nkxs\") pod \"minio\" (UID: \"1f16e4ac-5fb0-46bc-9d70-f148c65cb11b\") " pod="minio-dev/minio" Nov 25 09:57:11 crc kubenswrapper[4769]: I1125 09:57:11.003424 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-1378bc59-a238-4c5e-94f9-68a2f07056d5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1378bc59-a238-4c5e-94f9-68a2f07056d5\") pod \"minio\" (UID: \"1f16e4ac-5fb0-46bc-9d70-f148c65cb11b\") " pod="minio-dev/minio" Nov 25 09:57:11 crc kubenswrapper[4769]: I1125 09:57:11.008482 4769 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:57:11 crc kubenswrapper[4769]: I1125 09:57:11.008546 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-1378bc59-a238-4c5e-94f9-68a2f07056d5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1378bc59-a238-4c5e-94f9-68a2f07056d5\") pod \"minio\" (UID: \"1f16e4ac-5fb0-46bc-9d70-f148c65cb11b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/305043c38c70d50d31dd44317aaa565b1dd9360f62b65920b0e94d382aef98f2/globalmount\"" pod="minio-dev/minio" Nov 25 09:57:11 crc kubenswrapper[4769]: I1125 09:57:11.027777 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nkxs\" (UniqueName: \"kubernetes.io/projected/1f16e4ac-5fb0-46bc-9d70-f148c65cb11b-kube-api-access-8nkxs\") pod \"minio\" (UID: \"1f16e4ac-5fb0-46bc-9d70-f148c65cb11b\") " pod="minio-dev/minio" Nov 25 09:57:11 crc kubenswrapper[4769]: I1125 09:57:11.051333 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-1378bc59-a238-4c5e-94f9-68a2f07056d5\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1378bc59-a238-4c5e-94f9-68a2f07056d5\") pod \"minio\" (UID: \"1f16e4ac-5fb0-46bc-9d70-f148c65cb11b\") " pod="minio-dev/minio" Nov 25 09:57:11 crc kubenswrapper[4769]: I1125 09:57:11.189624 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Nov 25 09:57:11 crc kubenswrapper[4769]: I1125 09:57:11.846735 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Nov 25 09:57:11 crc kubenswrapper[4769]: W1125 09:57:11.863169 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1f16e4ac_5fb0_46bc_9d70_f148c65cb11b.slice/crio-980ff2c104d10174ae6d668e7b36e0e8afe661092899155deebf0d02ce61b4d8 WatchSource:0}: Error finding container 980ff2c104d10174ae6d668e7b36e0e8afe661092899155deebf0d02ce61b4d8: Status 404 returned error can't find the container with id 980ff2c104d10174ae6d668e7b36e0e8afe661092899155deebf0d02ce61b4d8 Nov 25 09:57:12 crc kubenswrapper[4769]: I1125 09:57:12.583708 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"1f16e4ac-5fb0-46bc-9d70-f148c65cb11b","Type":"ContainerStarted","Data":"980ff2c104d10174ae6d668e7b36e0e8afe661092899155deebf0d02ce61b4d8"} Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.133196 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gdskr"] Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.135452 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.154164 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gdskr"] Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.225782 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b17462-7c52-49bb-8839-99a88b04a050-catalog-content\") pod \"community-operators-gdskr\" (UID: \"00b17462-7c52-49bb-8839-99a88b04a050\") " pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.225871 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tl82t\" (UniqueName: \"kubernetes.io/projected/00b17462-7c52-49bb-8839-99a88b04a050-kube-api-access-tl82t\") pod \"community-operators-gdskr\" (UID: \"00b17462-7c52-49bb-8839-99a88b04a050\") " pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.225909 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b17462-7c52-49bb-8839-99a88b04a050-utilities\") pod \"community-operators-gdskr\" (UID: \"00b17462-7c52-49bb-8839-99a88b04a050\") " pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.327369 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tl82t\" (UniqueName: \"kubernetes.io/projected/00b17462-7c52-49bb-8839-99a88b04a050-kube-api-access-tl82t\") pod \"community-operators-gdskr\" (UID: \"00b17462-7c52-49bb-8839-99a88b04a050\") " pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.327435 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b17462-7c52-49bb-8839-99a88b04a050-utilities\") pod \"community-operators-gdskr\" (UID: \"00b17462-7c52-49bb-8839-99a88b04a050\") " pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.328054 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b17462-7c52-49bb-8839-99a88b04a050-catalog-content\") pod \"community-operators-gdskr\" (UID: \"00b17462-7c52-49bb-8839-99a88b04a050\") " pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.328662 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b17462-7c52-49bb-8839-99a88b04a050-utilities\") pod \"community-operators-gdskr\" (UID: \"00b17462-7c52-49bb-8839-99a88b04a050\") " pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.328709 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b17462-7c52-49bb-8839-99a88b04a050-catalog-content\") pod \"community-operators-gdskr\" (UID: \"00b17462-7c52-49bb-8839-99a88b04a050\") " pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.358284 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tl82t\" (UniqueName: \"kubernetes.io/projected/00b17462-7c52-49bb-8839-99a88b04a050-kube-api-access-tl82t\") pod \"community-operators-gdskr\" (UID: \"00b17462-7c52-49bb-8839-99a88b04a050\") " pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.456476 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.616816 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"1f16e4ac-5fb0-46bc-9d70-f148c65cb11b","Type":"ContainerStarted","Data":"913c7846c56d4cc041e730bec29b391c511e9f4a209bc1dea8465422d2b85b71"} Nov 25 09:57:15 crc kubenswrapper[4769]: I1125 09:57:15.647677 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=4.531122794 podStartE2EDuration="7.647648437s" podCreationTimestamp="2025-11-25 09:57:08 +0000 UTC" firstStartedPulling="2025-11-25 09:57:11.867094046 +0000 UTC m=+780.452066359" lastFinishedPulling="2025-11-25 09:57:14.983619689 +0000 UTC m=+783.568592002" observedRunningTime="2025-11-25 09:57:15.643253201 +0000 UTC m=+784.228225514" watchObservedRunningTime="2025-11-25 09:57:15.647648437 +0000 UTC m=+784.232620750" Nov 25 09:57:16 crc kubenswrapper[4769]: I1125 09:57:16.012498 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gdskr"] Nov 25 09:57:16 crc kubenswrapper[4769]: I1125 09:57:16.625916 4769 generic.go:334] "Generic (PLEG): container finished" podID="00b17462-7c52-49bb-8839-99a88b04a050" containerID="84140c6f223e9a000dcdb5a06ac02f1cc135c404bfb5c52c194631acd352addf" exitCode=0 Nov 25 09:57:16 crc kubenswrapper[4769]: I1125 09:57:16.625997 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdskr" event={"ID":"00b17462-7c52-49bb-8839-99a88b04a050","Type":"ContainerDied","Data":"84140c6f223e9a000dcdb5a06ac02f1cc135c404bfb5c52c194631acd352addf"} Nov 25 09:57:16 crc kubenswrapper[4769]: I1125 09:57:16.626057 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdskr" event={"ID":"00b17462-7c52-49bb-8839-99a88b04a050","Type":"ContainerStarted","Data":"a47c566279bcd87a6b52c6ad9b4a7907e93e8e77a86e3d58a6825515a87ac6b9"} Nov 25 09:57:17 crc kubenswrapper[4769]: I1125 09:57:17.635460 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdskr" event={"ID":"00b17462-7c52-49bb-8839-99a88b04a050","Type":"ContainerStarted","Data":"db13d32c6a59d4766deb65b8cb1037f68eb4ed8300fcb38385ee46341609f738"} Nov 25 09:57:18 crc kubenswrapper[4769]: I1125 09:57:18.644363 4769 generic.go:334] "Generic (PLEG): container finished" podID="00b17462-7c52-49bb-8839-99a88b04a050" containerID="db13d32c6a59d4766deb65b8cb1037f68eb4ed8300fcb38385ee46341609f738" exitCode=0 Nov 25 09:57:18 crc kubenswrapper[4769]: I1125 09:57:18.644482 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdskr" event={"ID":"00b17462-7c52-49bb-8839-99a88b04a050","Type":"ContainerDied","Data":"db13d32c6a59d4766deb65b8cb1037f68eb4ed8300fcb38385ee46341609f738"} Nov 25 09:57:19 crc kubenswrapper[4769]: I1125 09:57:19.670346 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdskr" event={"ID":"00b17462-7c52-49bb-8839-99a88b04a050","Type":"ContainerStarted","Data":"8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e"} Nov 25 09:57:19 crc kubenswrapper[4769]: I1125 09:57:19.702239 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gdskr" podStartSLOduration=2.141408764 podStartE2EDuration="4.702197456s" podCreationTimestamp="2025-11-25 09:57:15 +0000 UTC" firstStartedPulling="2025-11-25 09:57:16.630073207 +0000 UTC m=+785.215045520" lastFinishedPulling="2025-11-25 09:57:19.190861899 +0000 UTC m=+787.775834212" observedRunningTime="2025-11-25 09:57:19.694728995 +0000 UTC m=+788.279701298" watchObservedRunningTime="2025-11-25 09:57:19.702197456 +0000 UTC m=+788.287169799" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.086178 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-g29kd"] Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.088294 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.093738 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-qn2th" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.094100 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.094332 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.094685 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.097953 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.107949 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-g29kd"] Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.173787 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/163db859-0d28-48d8-b06a-f6a94e19479d-config\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.174110 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gz5t\" (UniqueName: \"kubernetes.io/projected/163db859-0d28-48d8-b06a-f6a94e19479d-kube-api-access-2gz5t\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.174176 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/163db859-0d28-48d8-b06a-f6a94e19479d-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.174277 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/163db859-0d28-48d8-b06a-f6a94e19479d-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.174329 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/163db859-0d28-48d8-b06a-f6a94e19479d-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.275687 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/163db859-0d28-48d8-b06a-f6a94e19479d-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.275750 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/163db859-0d28-48d8-b06a-f6a94e19479d-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.275812 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/163db859-0d28-48d8-b06a-f6a94e19479d-config\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.275894 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gz5t\" (UniqueName: \"kubernetes.io/projected/163db859-0d28-48d8-b06a-f6a94e19479d-kube-api-access-2gz5t\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.275921 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/163db859-0d28-48d8-b06a-f6a94e19479d-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.277075 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/163db859-0d28-48d8-b06a-f6a94e19479d-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.279039 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/163db859-0d28-48d8-b06a-f6a94e19479d-config\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.287877 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/163db859-0d28-48d8-b06a-f6a94e19479d-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.305289 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-tbhrj"] Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.306393 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.309818 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/163db859-0d28-48d8-b06a-f6a94e19479d-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.316391 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.316813 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.317757 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.318744 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-tbhrj"] Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.326827 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gz5t\" (UniqueName: \"kubernetes.io/projected/163db859-0d28-48d8-b06a-f6a94e19479d-kube-api-access-2gz5t\") pod \"logging-loki-distributor-76cc67bf56-g29kd\" (UID: \"163db859-0d28-48d8-b06a-f6a94e19479d\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.410330 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz"] Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.411383 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.411773 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.415795 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.416660 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.430086 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz"] Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.480512 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.480592 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-config\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.480619 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.480642 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.480686 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kb9fm\" (UniqueName: \"kubernetes.io/projected/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-kube-api-access-kb9fm\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.480731 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.563884 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5"] Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.565236 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.571444 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.571664 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.571835 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.572015 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.572140 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-h8hwm" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.575994 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc"] Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.580749 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.582427 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/8f616f7b-7627-4878-9279-6d12b8ac3bb7-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.582484 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.582507 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f616f7b-7627-4878-9279-6d12b8ac3bb7-config\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.582525 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.582547 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjvx2\" (UniqueName: \"kubernetes.io/projected/8f616f7b-7627-4878-9279-6d12b8ac3bb7-kube-api-access-gjvx2\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.582590 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-config\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.582615 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.582636 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.582674 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f616f7b-7627-4878-9279-6d12b8ac3bb7-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.582699 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/8f616f7b-7627-4878-9279-6d12b8ac3bb7-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.582716 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kb9fm\" (UniqueName: \"kubernetes.io/projected/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-kube-api-access-kb9fm\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.583468 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.587295 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.588037 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-config\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.599787 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.605935 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.621860 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kb9fm\" (UniqueName: \"kubernetes.io/projected/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-kube-api-access-kb9fm\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.625152 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5"] Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.633347 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/6091a51e-9c46-48cd-bb3a-ff1f3c9aa965-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-tbhrj\" (UID: \"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.634945 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc"] Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.681845 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.683794 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f535e254-5602-4794-9f47-e9bb2c1454b2-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.683833 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/f535e254-5602-4794-9f47-e9bb2c1454b2-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.683879 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g4ww\" (UniqueName: \"kubernetes.io/projected/daa191b3-4057-42fd-8c0c-d0aa065af77b-kube-api-access-4g4ww\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.683907 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/daa191b3-4057-42fd-8c0c-d0aa065af77b-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.683948 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/f535e254-5602-4794-9f47-e9bb2c1454b2-rbac\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684037 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f535e254-5602-4794-9f47-e9bb2c1454b2-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684072 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/daa191b3-4057-42fd-8c0c-d0aa065af77b-tenants\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684114 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrp6d\" (UniqueName: \"kubernetes.io/projected/f535e254-5602-4794-9f47-e9bb2c1454b2-kube-api-access-hrp6d\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684135 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/daa191b3-4057-42fd-8c0c-d0aa065af77b-lokistack-gateway\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684154 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/daa191b3-4057-42fd-8c0c-d0aa065af77b-rbac\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684204 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f616f7b-7627-4878-9279-6d12b8ac3bb7-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684234 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/8f616f7b-7627-4878-9279-6d12b8ac3bb7-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684278 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/f535e254-5602-4794-9f47-e9bb2c1454b2-tenants\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684298 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f535e254-5602-4794-9f47-e9bb2c1454b2-tls-secret\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684321 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/daa191b3-4057-42fd-8c0c-d0aa065af77b-tls-secret\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684364 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/daa191b3-4057-42fd-8c0c-d0aa065af77b-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684409 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/8f616f7b-7627-4878-9279-6d12b8ac3bb7-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684437 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/f535e254-5602-4794-9f47-e9bb2c1454b2-lokistack-gateway\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684485 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f616f7b-7627-4878-9279-6d12b8ac3bb7-config\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684503 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/daa191b3-4057-42fd-8c0c-d0aa065af77b-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.684524 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjvx2\" (UniqueName: \"kubernetes.io/projected/8f616f7b-7627-4878-9279-6d12b8ac3bb7-kube-api-access-gjvx2\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.686215 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f616f7b-7627-4878-9279-6d12b8ac3bb7-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.689158 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f616f7b-7627-4878-9279-6d12b8ac3bb7-config\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.700491 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/8f616f7b-7627-4878-9279-6d12b8ac3bb7-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.700909 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/8f616f7b-7627-4878-9279-6d12b8ac3bb7-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.760078 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjvx2\" (UniqueName: \"kubernetes.io/projected/8f616f7b-7627-4878-9279-6d12b8ac3bb7-kube-api-access-gjvx2\") pod \"logging-loki-query-frontend-84558f7c9f-vdrhz\" (UID: \"8f616f7b-7627-4878-9279-6d12b8ac3bb7\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792201 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f535e254-5602-4794-9f47-e9bb2c1454b2-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792266 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/f535e254-5602-4794-9f47-e9bb2c1454b2-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792301 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g4ww\" (UniqueName: \"kubernetes.io/projected/daa191b3-4057-42fd-8c0c-d0aa065af77b-kube-api-access-4g4ww\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792334 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/daa191b3-4057-42fd-8c0c-d0aa065af77b-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792353 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/f535e254-5602-4794-9f47-e9bb2c1454b2-rbac\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792374 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f535e254-5602-4794-9f47-e9bb2c1454b2-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792415 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/daa191b3-4057-42fd-8c0c-d0aa065af77b-tenants\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792442 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrp6d\" (UniqueName: \"kubernetes.io/projected/f535e254-5602-4794-9f47-e9bb2c1454b2-kube-api-access-hrp6d\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792463 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/daa191b3-4057-42fd-8c0c-d0aa065af77b-rbac\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792481 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/daa191b3-4057-42fd-8c0c-d0aa065af77b-lokistack-gateway\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792525 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/f535e254-5602-4794-9f47-e9bb2c1454b2-tenants\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792543 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f535e254-5602-4794-9f47-e9bb2c1454b2-tls-secret\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792565 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/daa191b3-4057-42fd-8c0c-d0aa065af77b-tls-secret\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792597 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/daa191b3-4057-42fd-8c0c-d0aa065af77b-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792625 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/f535e254-5602-4794-9f47-e9bb2c1454b2-lokistack-gateway\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.792652 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/daa191b3-4057-42fd-8c0c-d0aa065af77b-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.793562 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/daa191b3-4057-42fd-8c0c-d0aa065af77b-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.794266 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/daa191b3-4057-42fd-8c0c-d0aa065af77b-rbac\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.794611 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f535e254-5602-4794-9f47-e9bb2c1454b2-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.794940 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/daa191b3-4057-42fd-8c0c-d0aa065af77b-lokistack-gateway\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.797289 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/f535e254-5602-4794-9f47-e9bb2c1454b2-rbac\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: E1125 09:57:20.795223 4769 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.797530 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:20 crc kubenswrapper[4769]: E1125 09:57:20.798672 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f535e254-5602-4794-9f47-e9bb2c1454b2-tls-secret podName:f535e254-5602-4794-9f47-e9bb2c1454b2 nodeName:}" failed. No retries permitted until 2025-11-25 09:57:21.298644644 +0000 UTC m=+789.883616957 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/f535e254-5602-4794-9f47-e9bb2c1454b2-tls-secret") pod "logging-loki-gateway-5bdd8fd454-klwtc" (UID: "f535e254-5602-4794-9f47-e9bb2c1454b2") : secret "logging-loki-gateway-http" not found Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.799404 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/f535e254-5602-4794-9f47-e9bb2c1454b2-lokistack-gateway\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: E1125 09:57:20.800405 4769 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Nov 25 09:57:20 crc kubenswrapper[4769]: E1125 09:57:20.800439 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/daa191b3-4057-42fd-8c0c-d0aa065af77b-tls-secret podName:daa191b3-4057-42fd-8c0c-d0aa065af77b nodeName:}" failed. No retries permitted until 2025-11-25 09:57:21.300429437 +0000 UTC m=+789.885401750 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/daa191b3-4057-42fd-8c0c-d0aa065af77b-tls-secret") pod "logging-loki-gateway-5bdd8fd454-8hzw5" (UID: "daa191b3-4057-42fd-8c0c-d0aa065af77b") : secret "logging-loki-gateway-http" not found Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.801256 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/daa191b3-4057-42fd-8c0c-d0aa065af77b-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.808801 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/daa191b3-4057-42fd-8c0c-d0aa065af77b-tenants\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.810614 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f535e254-5602-4794-9f47-e9bb2c1454b2-logging-loki-ca-bundle\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.813911 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/f535e254-5602-4794-9f47-e9bb2c1454b2-tenants\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.816006 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/f535e254-5602-4794-9f47-e9bb2c1454b2-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.824799 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/daa191b3-4057-42fd-8c0c-d0aa065af77b-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.829940 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g4ww\" (UniqueName: \"kubernetes.io/projected/daa191b3-4057-42fd-8c0c-d0aa065af77b-kube-api-access-4g4ww\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:20 crc kubenswrapper[4769]: I1125 09:57:20.837951 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrp6d\" (UniqueName: \"kubernetes.io/projected/f535e254-5602-4794-9f47-e9bb2c1454b2-kube-api-access-hrp6d\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.244503 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.247436 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.250325 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.250355 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.281207 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-g29kd"] Nov 25 09:57:21 crc kubenswrapper[4769]: W1125 09:57:21.281261 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod163db859_0d28_48d8_b06a_f6a94e19479d.slice/crio-d863dca0e5e93d9c2444e486fcbd6876e0d37ee12ae4322a0896118445a90dca WatchSource:0}: Error finding container d863dca0e5e93d9c2444e486fcbd6876e0d37ee12ae4322a0896118445a90dca: Status 404 returned error can't find the container with id d863dca0e5e93d9c2444e486fcbd6876e0d37ee12ae4322a0896118445a90dca Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.306146 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f535e254-5602-4794-9f47-e9bb2c1454b2-tls-secret\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.306204 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/daa191b3-4057-42fd-8c0c-d0aa065af77b-tls-secret\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.311255 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/daa191b3-4057-42fd-8c0c-d0aa065af77b-tls-secret\") pod \"logging-loki-gateway-5bdd8fd454-8hzw5\" (UID: \"daa191b3-4057-42fd-8c0c-d0aa065af77b\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.311424 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/f535e254-5602-4794-9f47-e9bb2c1454b2-tls-secret\") pod \"logging-loki-gateway-5bdd8fd454-klwtc\" (UID: \"f535e254-5602-4794-9f47-e9bb2c1454b2\") " pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.320181 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.399155 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.401570 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.406456 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.407318 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.407503 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853f39bc-058d-47a2-82f1-827f485f11a5-config\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.407564 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-dbe7a626-f2b4-4c47-9812-83367f17cb47\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dbe7a626-f2b4-4c47-9812-83367f17cb47\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.407619 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-517b1b83-09ac-489b-ac1d-4c113950bb57\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-517b1b83-09ac-489b-ac1d-4c113950bb57\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.407681 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/853f39bc-058d-47a2-82f1-827f485f11a5-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.407711 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zldfh\" (UniqueName: \"kubernetes.io/projected/853f39bc-058d-47a2-82f1-827f485f11a5-kube-api-access-zldfh\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.407731 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/853f39bc-058d-47a2-82f1-827f485f11a5-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.407751 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/853f39bc-058d-47a2-82f1-827f485f11a5-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.407787 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/853f39bc-058d-47a2-82f1-827f485f11a5-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.411376 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.449075 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-tbhrj"] Nov 25 09:57:21 crc kubenswrapper[4769]: W1125 09:57:21.460592 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6091a51e_9c46_48cd_bb3a_ff1f3c9aa965.slice/crio-6919b9e4375ae7cb5b08f79cf9886aead09d728609bad17ad224ab73c331d308 WatchSource:0}: Error finding container 6919b9e4375ae7cb5b08f79cf9886aead09d728609bad17ad224ab73c331d308: Status 404 returned error can't find the container with id 6919b9e4375ae7cb5b08f79cf9886aead09d728609bad17ad224ab73c331d308 Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.484875 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.486196 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.495172 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.495408 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.501113 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.510746 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a4405372-5512-4a21-9e58-569bdcd4389c-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.510812 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4405372-5512-4a21-9e58-569bdcd4389c-config\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.510859 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-517b1b83-09ac-489b-ac1d-4c113950bb57\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-517b1b83-09ac-489b-ac1d-4c113950bb57\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.510928 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/853f39bc-058d-47a2-82f1-827f485f11a5-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.510979 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zldfh\" (UniqueName: \"kubernetes.io/projected/853f39bc-058d-47a2-82f1-827f485f11a5-kube-api-access-zldfh\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.511008 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/a4405372-5512-4a21-9e58-569bdcd4389c-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.511037 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/853f39bc-058d-47a2-82f1-827f485f11a5-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.511062 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/853f39bc-058d-47a2-82f1-827f485f11a5-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.511091 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-64fb344e-a7ad-481b-833b-ff476e7f0e3b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-64fb344e-a7ad-481b-833b-ff476e7f0e3b\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.511130 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/853f39bc-058d-47a2-82f1-827f485f11a5-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.511155 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a4405372-5512-4a21-9e58-569bdcd4389c-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.511193 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zt8tg\" (UniqueName: \"kubernetes.io/projected/a4405372-5512-4a21-9e58-569bdcd4389c-kube-api-access-zt8tg\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.511231 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853f39bc-058d-47a2-82f1-827f485f11a5-config\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.511269 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/a4405372-5512-4a21-9e58-569bdcd4389c-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.511306 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-dbe7a626-f2b4-4c47-9812-83367f17cb47\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dbe7a626-f2b4-4c47-9812-83367f17cb47\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.521074 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/853f39bc-058d-47a2-82f1-827f485f11a5-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.522278 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853f39bc-058d-47a2-82f1-827f485f11a5-config\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.524871 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/853f39bc-058d-47a2-82f1-827f485f11a5-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.525622 4769 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.525683 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-dbe7a626-f2b4-4c47-9812-83367f17cb47\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dbe7a626-f2b4-4c47-9812-83367f17cb47\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/62588f07710ad5476c2b7acd4f86631360a4fdf61fafde8cc252b54e798804c2/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.526199 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/853f39bc-058d-47a2-82f1-827f485f11a5-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.527172 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.529747 4769 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.529793 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-517b1b83-09ac-489b-ac1d-4c113950bb57\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-517b1b83-09ac-489b-ac1d-4c113950bb57\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/77936e00ef635043059335f5170c774cb97bfa90750cfc27567f07e335ee846b/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.537794 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/853f39bc-058d-47a2-82f1-827f485f11a5-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.553857 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zldfh\" (UniqueName: \"kubernetes.io/projected/853f39bc-058d-47a2-82f1-827f485f11a5-kube-api-access-zldfh\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.564504 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.592491 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-dbe7a626-f2b4-4c47-9812-83367f17cb47\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dbe7a626-f2b4-4c47-9812-83367f17cb47\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.593936 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-517b1b83-09ac-489b-ac1d-4c113950bb57\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-517b1b83-09ac-489b-ac1d-4c113950bb57\") pod \"logging-loki-ingester-0\" (UID: \"853f39bc-058d-47a2-82f1-827f485f11a5\") " pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616287 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-64fb344e-a7ad-481b-833b-ff476e7f0e3b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-64fb344e-a7ad-481b-833b-ff476e7f0e3b\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616381 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zt8tg\" (UniqueName: \"kubernetes.io/projected/a4405372-5512-4a21-9e58-569bdcd4389c-kube-api-access-zt8tg\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616420 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/ba6e5673-c7e8-4242-b24e-85603e10e8ac-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616454 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba6e5673-c7e8-4242-b24e-85603e10e8ac-config\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616485 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/a4405372-5512-4a21-9e58-569bdcd4389c-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616538 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4405372-5512-4a21-9e58-569bdcd4389c-config\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616564 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/ba6e5673-c7e8-4242-b24e-85603e10e8ac-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616611 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/a4405372-5512-4a21-9e58-569bdcd4389c-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616638 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/ba6e5673-c7e8-4242-b24e-85603e10e8ac-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616672 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a4405372-5512-4a21-9e58-569bdcd4389c-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616735 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4d4d5489-738c-4626-997a-5ded6d6afcf0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4d4d5489-738c-4626-997a-5ded6d6afcf0\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616772 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ba6e5673-c7e8-4242-b24e-85603e10e8ac-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616826 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a4405372-5512-4a21-9e58-569bdcd4389c-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.616883 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgz5s\" (UniqueName: \"kubernetes.io/projected/ba6e5673-c7e8-4242-b24e-85603e10e8ac-kube-api-access-fgz5s\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.620880 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a4405372-5512-4a21-9e58-569bdcd4389c-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.621106 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4405372-5512-4a21-9e58-569bdcd4389c-config\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.622619 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/a4405372-5512-4a21-9e58-569bdcd4389c-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.623567 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/a4405372-5512-4a21-9e58-569bdcd4389c-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.623613 4769 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.623667 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-64fb344e-a7ad-481b-833b-ff476e7f0e3b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-64fb344e-a7ad-481b-833b-ff476e7f0e3b\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1deaa8650d20e8d1081857fb2bc22622eae5d73721ef7c4964b6b1a9d79bf79d/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.628614 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/a4405372-5512-4a21-9e58-569bdcd4389c-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.631216 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz"] Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.640392 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zt8tg\" (UniqueName: \"kubernetes.io/projected/a4405372-5512-4a21-9e58-569bdcd4389c-kube-api-access-zt8tg\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.666531 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-64fb344e-a7ad-481b-833b-ff476e7f0e3b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-64fb344e-a7ad-481b-833b-ff476e7f0e3b\") pod \"logging-loki-compactor-0\" (UID: \"a4405372-5512-4a21-9e58-569bdcd4389c\") " pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.717236 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" event={"ID":"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965","Type":"ContainerStarted","Data":"6919b9e4375ae7cb5b08f79cf9886aead09d728609bad17ad224ab73c331d308"} Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.718599 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/ba6e5673-c7e8-4242-b24e-85603e10e8ac-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.718665 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba6e5673-c7e8-4242-b24e-85603e10e8ac-config\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.718710 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/ba6e5673-c7e8-4242-b24e-85603e10e8ac-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.718745 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/ba6e5673-c7e8-4242-b24e-85603e10e8ac-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.718781 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4d4d5489-738c-4626-997a-5ded6d6afcf0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4d4d5489-738c-4626-997a-5ded6d6afcf0\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.718806 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ba6e5673-c7e8-4242-b24e-85603e10e8ac-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.718849 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgz5s\" (UniqueName: \"kubernetes.io/projected/ba6e5673-c7e8-4242-b24e-85603e10e8ac-kube-api-access-fgz5s\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.721521 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba6e5673-c7e8-4242-b24e-85603e10e8ac-config\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.722534 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ba6e5673-c7e8-4242-b24e-85603e10e8ac-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.723320 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/ba6e5673-c7e8-4242-b24e-85603e10e8ac-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.724810 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/ba6e5673-c7e8-4242-b24e-85603e10e8ac-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.727159 4769 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.727198 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4d4d5489-738c-4626-997a-5ded6d6afcf0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4d4d5489-738c-4626-997a-5ded6d6afcf0\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0f43d209c1b42fce5cd4df3790c29f97128a2c08283199a5a2633eaaf3b090c9/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.728340 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.729885 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" event={"ID":"163db859-0d28-48d8-b06a-f6a94e19479d","Type":"ContainerStarted","Data":"d863dca0e5e93d9c2444e486fcbd6876e0d37ee12ae4322a0896118445a90dca"} Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.732880 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/ba6e5673-c7e8-4242-b24e-85603e10e8ac-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.735145 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" event={"ID":"8f616f7b-7627-4878-9279-6d12b8ac3bb7","Type":"ContainerStarted","Data":"efdc3725499a1d9755c39708d4a8c05d78e345ebb64545f0c55f7f3042e05c6b"} Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.742084 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgz5s\" (UniqueName: \"kubernetes.io/projected/ba6e5673-c7e8-4242-b24e-85603e10e8ac-kube-api-access-fgz5s\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.761956 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4d4d5489-738c-4626-997a-5ded6d6afcf0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4d4d5489-738c-4626-997a-5ded6d6afcf0\") pod \"logging-loki-index-gateway-0\" (UID: \"ba6e5673-c7e8-4242-b24e-85603e10e8ac\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.816095 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:21 crc kubenswrapper[4769]: I1125 09:57:21.882032 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.067046 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5"] Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.205350 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc"] Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.277348 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 25 09:57:22 crc kubenswrapper[4769]: W1125 09:57:22.281692 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4405372_5512_4a21_9e58_569bdcd4389c.slice/crio-eea5aabba308bececd8f3a8d1379b93d2cf0261379cca6830b14f26474a44616 WatchSource:0}: Error finding container eea5aabba308bececd8f3a8d1379b93d2cf0261379cca6830b14f26474a44616: Status 404 returned error can't find the container with id eea5aabba308bececd8f3a8d1379b93d2cf0261379cca6830b14f26474a44616 Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.290180 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.290379 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.290442 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.291842 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2b789aba899603f1a14199b861fe977351ed8d7e8ef00a45b850160445e4d04d"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.291914 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://2b789aba899603f1a14199b861fe977351ed8d7e8ef00a45b850160445e4d04d" gracePeriod=600 Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.422911 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 25 09:57:22 crc kubenswrapper[4769]: W1125 09:57:22.426772 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba6e5673_c7e8_4242_b24e_85603e10e8ac.slice/crio-cb6326793e5034317c9bc585b792da7c78f86a1e17da7fc7cec93668d12df995 WatchSource:0}: Error finding container cb6326793e5034317c9bc585b792da7c78f86a1e17da7fc7cec93668d12df995: Status 404 returned error can't find the container with id cb6326793e5034317c9bc585b792da7c78f86a1e17da7fc7cec93668d12df995 Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.486385 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.766393 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"853f39bc-058d-47a2-82f1-827f485f11a5","Type":"ContainerStarted","Data":"b5cd0bad27fc9f74c581fe7853a5b2597050c06e3631e093eef858be176f5499"} Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.768421 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" event={"ID":"daa191b3-4057-42fd-8c0c-d0aa065af77b","Type":"ContainerStarted","Data":"7afbcf5d5423c0a19fcf5177fa2bf67ae51051bdc4f32021612866dfe79aaf19"} Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.769816 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"a4405372-5512-4a21-9e58-569bdcd4389c","Type":"ContainerStarted","Data":"eea5aabba308bececd8f3a8d1379b93d2cf0261379cca6830b14f26474a44616"} Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.774262 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="2b789aba899603f1a14199b861fe977351ed8d7e8ef00a45b850160445e4d04d" exitCode=0 Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.774322 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"2b789aba899603f1a14199b861fe977351ed8d7e8ef00a45b850160445e4d04d"} Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.774397 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"890cbbbb644d2d20db67bdcbf036dd9908902406f9cea9d984225deeb8a33fbe"} Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.774419 4769 scope.go:117] "RemoveContainer" containerID="7240a68bdc93001883e181fdd6d0a4be2f87fda7024907954cc0af44e59b1c48" Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.776523 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" event={"ID":"f535e254-5602-4794-9f47-e9bb2c1454b2","Type":"ContainerStarted","Data":"f99924f51bd7c6245050d61c92de616ffa4479063e23b148250433384ac7b87c"} Nov 25 09:57:22 crc kubenswrapper[4769]: I1125 09:57:22.779158 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"ba6e5673-c7e8-4242-b24e-85603e10e8ac","Type":"ContainerStarted","Data":"cb6326793e5034317c9bc585b792da7c78f86a1e17da7fc7cec93668d12df995"} Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.456808 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.457709 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.518198 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.767577 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-s9x5s"] Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.769247 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.788869 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s9x5s"] Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.822034 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4wlt\" (UniqueName: \"kubernetes.io/projected/280c1227-51da-4ce3-a43b-08a0cbb9dedb-kube-api-access-c4wlt\") pod \"certified-operators-s9x5s\" (UID: \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\") " pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.822615 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/280c1227-51da-4ce3-a43b-08a0cbb9dedb-utilities\") pod \"certified-operators-s9x5s\" (UID: \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\") " pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.822685 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/280c1227-51da-4ce3-a43b-08a0cbb9dedb-catalog-content\") pod \"certified-operators-s9x5s\" (UID: \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\") " pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.886668 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.924249 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/280c1227-51da-4ce3-a43b-08a0cbb9dedb-utilities\") pod \"certified-operators-s9x5s\" (UID: \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\") " pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.924393 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/280c1227-51da-4ce3-a43b-08a0cbb9dedb-catalog-content\") pod \"certified-operators-s9x5s\" (UID: \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\") " pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.924452 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4wlt\" (UniqueName: \"kubernetes.io/projected/280c1227-51da-4ce3-a43b-08a0cbb9dedb-kube-api-access-c4wlt\") pod \"certified-operators-s9x5s\" (UID: \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\") " pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.925064 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/280c1227-51da-4ce3-a43b-08a0cbb9dedb-utilities\") pod \"certified-operators-s9x5s\" (UID: \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\") " pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.925215 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/280c1227-51da-4ce3-a43b-08a0cbb9dedb-catalog-content\") pod \"certified-operators-s9x5s\" (UID: \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\") " pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:25 crc kubenswrapper[4769]: I1125 09:57:25.961554 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4wlt\" (UniqueName: \"kubernetes.io/projected/280c1227-51da-4ce3-a43b-08a0cbb9dedb-kube-api-access-c4wlt\") pod \"certified-operators-s9x5s\" (UID: \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\") " pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:26 crc kubenswrapper[4769]: I1125 09:57:26.110096 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.527770 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s9x5s"] Nov 25 09:57:27 crc kubenswrapper[4769]: W1125 09:57:27.533193 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod280c1227_51da_4ce3_a43b_08a0cbb9dedb.slice/crio-e50510da82cced00c5540cc9bbeab6373536da9fc177e41c2a3fd9926e5a0d62 WatchSource:0}: Error finding container e50510da82cced00c5540cc9bbeab6373536da9fc177e41c2a3fd9926e5a0d62: Status 404 returned error can't find the container with id e50510da82cced00c5540cc9bbeab6373536da9fc177e41c2a3fd9926e5a0d62 Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.846540 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" event={"ID":"8f616f7b-7627-4878-9279-6d12b8ac3bb7","Type":"ContainerStarted","Data":"786c4b5a9bf30d29212a63a3b1cfcbaaeb9ed9efdee88ee50c895c6711ed3c88"} Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.847094 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.849849 4769 generic.go:334] "Generic (PLEG): container finished" podID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" containerID="3ea9963fed5af9e1a5a5f42939131f2ac951def1197925af19d6ae4be1ebc2a8" exitCode=0 Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.849933 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9x5s" event={"ID":"280c1227-51da-4ce3-a43b-08a0cbb9dedb","Type":"ContainerDied","Data":"3ea9963fed5af9e1a5a5f42939131f2ac951def1197925af19d6ae4be1ebc2a8"} Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.849986 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9x5s" event={"ID":"280c1227-51da-4ce3-a43b-08a0cbb9dedb","Type":"ContainerStarted","Data":"e50510da82cced00c5540cc9bbeab6373536da9fc177e41c2a3fd9926e5a0d62"} Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.851757 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" event={"ID":"daa191b3-4057-42fd-8c0c-d0aa065af77b","Type":"ContainerStarted","Data":"676ab03a01dfe7de0342c3dac03fd163fea3d655b9338a55d8da8287dc6feeb7"} Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.853430 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" event={"ID":"163db859-0d28-48d8-b06a-f6a94e19479d","Type":"ContainerStarted","Data":"9bf35c06f267efc1942f7418021bb223513c2d496cb2ab1c97f768c2e0dc4892"} Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.853523 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.856338 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"a4405372-5512-4a21-9e58-569bdcd4389c","Type":"ContainerStarted","Data":"b212fd4582e5efb91d31a8adcda4a57d096d29b0a9e050bb9407d18d0257709d"} Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.857153 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.859877 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" event={"ID":"f535e254-5602-4794-9f47-e9bb2c1454b2","Type":"ContainerStarted","Data":"3afda134c4e523230565b78a395d1e42995fc22619e71dbaa43563d23534020e"} Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.861747 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"ba6e5673-c7e8-4242-b24e-85603e10e8ac","Type":"ContainerStarted","Data":"112cb996f3b8ed8bb5622949373b5526764a81af97ec459a3f819a6d3afdd862"} Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.861888 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.867467 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" event={"ID":"6091a51e-9c46-48cd-bb3a-ff1f3c9aa965","Type":"ContainerStarted","Data":"b731424ca7907d29804df21fed427cfcfeb43cd1a476ac78d4a5af4d46f1440e"} Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.867546 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.872525 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"853f39bc-058d-47a2-82f1-827f485f11a5","Type":"ContainerStarted","Data":"cdc6f341def709e58528b1e4499f3b4e7f54306fd8c61f23b4c327fb895d5a39"} Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.872726 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.873044 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" podStartSLOduration=2.422675951 podStartE2EDuration="7.873019246s" podCreationTimestamp="2025-11-25 09:57:20 +0000 UTC" firstStartedPulling="2025-11-25 09:57:21.651821048 +0000 UTC m=+790.236793361" lastFinishedPulling="2025-11-25 09:57:27.102164343 +0000 UTC m=+795.687136656" observedRunningTime="2025-11-25 09:57:27.868497757 +0000 UTC m=+796.453470070" watchObservedRunningTime="2025-11-25 09:57:27.873019246 +0000 UTC m=+796.457991569" Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.910598 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" podStartSLOduration=2.265038548 podStartE2EDuration="7.910571124s" podCreationTimestamp="2025-11-25 09:57:20 +0000 UTC" firstStartedPulling="2025-11-25 09:57:21.474792596 +0000 UTC m=+790.059764909" lastFinishedPulling="2025-11-25 09:57:27.120325152 +0000 UTC m=+795.705297485" observedRunningTime="2025-11-25 09:57:27.909175351 +0000 UTC m=+796.494147664" watchObservedRunningTime="2025-11-25 09:57:27.910571124 +0000 UTC m=+796.495543437" Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.944168 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=3.178802438 podStartE2EDuration="7.944141836s" podCreationTimestamp="2025-11-25 09:57:20 +0000 UTC" firstStartedPulling="2025-11-25 09:57:22.323325348 +0000 UTC m=+790.908297661" lastFinishedPulling="2025-11-25 09:57:27.088664726 +0000 UTC m=+795.673637059" observedRunningTime="2025-11-25 09:57:27.939311409 +0000 UTC m=+796.524283732" watchObservedRunningTime="2025-11-25 09:57:27.944141836 +0000 UTC m=+796.529114149" Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.981997 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" podStartSLOduration=2.220066501 podStartE2EDuration="7.981963581s" podCreationTimestamp="2025-11-25 09:57:20 +0000 UTC" firstStartedPulling="2025-11-25 09:57:21.291617436 +0000 UTC m=+789.876589749" lastFinishedPulling="2025-11-25 09:57:27.053514516 +0000 UTC m=+795.638486829" observedRunningTime="2025-11-25 09:57:27.970898203 +0000 UTC m=+796.555870516" watchObservedRunningTime="2025-11-25 09:57:27.981963581 +0000 UTC m=+796.566935894" Nov 25 09:57:27 crc kubenswrapper[4769]: I1125 09:57:27.999433 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=3.340593091 podStartE2EDuration="7.999412903s" podCreationTimestamp="2025-11-25 09:57:20 +0000 UTC" firstStartedPulling="2025-11-25 09:57:22.443474754 +0000 UTC m=+791.028447067" lastFinishedPulling="2025-11-25 09:57:27.102294566 +0000 UTC m=+795.687266879" observedRunningTime="2025-11-25 09:57:27.996718178 +0000 UTC m=+796.581690491" watchObservedRunningTime="2025-11-25 09:57:27.999412903 +0000 UTC m=+796.584385216" Nov 25 09:57:28 crc kubenswrapper[4769]: I1125 09:57:28.036814 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=3.410757658 podStartE2EDuration="8.036794197s" podCreationTimestamp="2025-11-25 09:57:20 +0000 UTC" firstStartedPulling="2025-11-25 09:57:22.529399262 +0000 UTC m=+791.114371575" lastFinishedPulling="2025-11-25 09:57:27.155435801 +0000 UTC m=+795.740408114" observedRunningTime="2025-11-25 09:57:28.035438174 +0000 UTC m=+796.620410517" watchObservedRunningTime="2025-11-25 09:57:28.036794197 +0000 UTC m=+796.621766510" Nov 25 09:57:28 crc kubenswrapper[4769]: I1125 09:57:28.156321 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gdskr"] Nov 25 09:57:28 crc kubenswrapper[4769]: I1125 09:57:28.156643 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gdskr" podUID="00b17462-7c52-49bb-8839-99a88b04a050" containerName="registry-server" containerID="cri-o://8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e" gracePeriod=2 Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.724089 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.818537 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b17462-7c52-49bb-8839-99a88b04a050-utilities\") pod \"00b17462-7c52-49bb-8839-99a88b04a050\" (UID: \"00b17462-7c52-49bb-8839-99a88b04a050\") " Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.818604 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b17462-7c52-49bb-8839-99a88b04a050-catalog-content\") pod \"00b17462-7c52-49bb-8839-99a88b04a050\" (UID: \"00b17462-7c52-49bb-8839-99a88b04a050\") " Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.818717 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tl82t\" (UniqueName: \"kubernetes.io/projected/00b17462-7c52-49bb-8839-99a88b04a050-kube-api-access-tl82t\") pod \"00b17462-7c52-49bb-8839-99a88b04a050\" (UID: \"00b17462-7c52-49bb-8839-99a88b04a050\") " Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.820273 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00b17462-7c52-49bb-8839-99a88b04a050-utilities" (OuterVolumeSpecName: "utilities") pod "00b17462-7c52-49bb-8839-99a88b04a050" (UID: "00b17462-7c52-49bb-8839-99a88b04a050"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.826698 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00b17462-7c52-49bb-8839-99a88b04a050-kube-api-access-tl82t" (OuterVolumeSpecName: "kube-api-access-tl82t") pod "00b17462-7c52-49bb-8839-99a88b04a050" (UID: "00b17462-7c52-49bb-8839-99a88b04a050"). InnerVolumeSpecName "kube-api-access-tl82t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.893319 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00b17462-7c52-49bb-8839-99a88b04a050-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "00b17462-7c52-49bb-8839-99a88b04a050" (UID: "00b17462-7c52-49bb-8839-99a88b04a050"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.896358 4769 generic.go:334] "Generic (PLEG): container finished" podID="00b17462-7c52-49bb-8839-99a88b04a050" containerID="8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e" exitCode=0 Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.896449 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdskr" event={"ID":"00b17462-7c52-49bb-8839-99a88b04a050","Type":"ContainerDied","Data":"8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e"} Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.896484 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gdskr" event={"ID":"00b17462-7c52-49bb-8839-99a88b04a050","Type":"ContainerDied","Data":"a47c566279bcd87a6b52c6ad9b4a7907e93e8e77a86e3d58a6825515a87ac6b9"} Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.896508 4769 scope.go:117] "RemoveContainer" containerID="8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e" Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.896519 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gdskr" Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.899523 4769 generic.go:334] "Generic (PLEG): container finished" podID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" containerID="360a4c3786e9d5aa34cd3e9901cffc8e5285760c8dc2d1c3bd1ff7cedaf460a2" exitCode=0 Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.900041 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9x5s" event={"ID":"280c1227-51da-4ce3-a43b-08a0cbb9dedb","Type":"ContainerDied","Data":"360a4c3786e9d5aa34cd3e9901cffc8e5285760c8dc2d1c3bd1ff7cedaf460a2"} Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.921577 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tl82t\" (UniqueName: \"kubernetes.io/projected/00b17462-7c52-49bb-8839-99a88b04a050-kube-api-access-tl82t\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.921616 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00b17462-7c52-49bb-8839-99a88b04a050-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.921631 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00b17462-7c52-49bb-8839-99a88b04a050-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.928403 4769 scope.go:117] "RemoveContainer" containerID="db13d32c6a59d4766deb65b8cb1037f68eb4ed8300fcb38385ee46341609f738" Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.944919 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gdskr"] Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.951378 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gdskr"] Nov 25 09:57:29 crc kubenswrapper[4769]: I1125 09:57:29.975312 4769 scope.go:117] "RemoveContainer" containerID="84140c6f223e9a000dcdb5a06ac02f1cc135c404bfb5c52c194631acd352addf" Nov 25 09:57:30 crc kubenswrapper[4769]: I1125 09:57:30.003854 4769 scope.go:117] "RemoveContainer" containerID="8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e" Nov 25 09:57:30 crc kubenswrapper[4769]: E1125 09:57:30.005154 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e\": container with ID starting with 8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e not found: ID does not exist" containerID="8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e" Nov 25 09:57:30 crc kubenswrapper[4769]: I1125 09:57:30.005211 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e"} err="failed to get container status \"8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e\": rpc error: code = NotFound desc = could not find container \"8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e\": container with ID starting with 8e1a0764a9e6f16a2ad2a167124bdc211e3eae11bf72d475f5dd3fd0c6c4049e not found: ID does not exist" Nov 25 09:57:30 crc kubenswrapper[4769]: I1125 09:57:30.005254 4769 scope.go:117] "RemoveContainer" containerID="db13d32c6a59d4766deb65b8cb1037f68eb4ed8300fcb38385ee46341609f738" Nov 25 09:57:30 crc kubenswrapper[4769]: E1125 09:57:30.005941 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db13d32c6a59d4766deb65b8cb1037f68eb4ed8300fcb38385ee46341609f738\": container with ID starting with db13d32c6a59d4766deb65b8cb1037f68eb4ed8300fcb38385ee46341609f738 not found: ID does not exist" containerID="db13d32c6a59d4766deb65b8cb1037f68eb4ed8300fcb38385ee46341609f738" Nov 25 09:57:30 crc kubenswrapper[4769]: I1125 09:57:30.006060 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db13d32c6a59d4766deb65b8cb1037f68eb4ed8300fcb38385ee46341609f738"} err="failed to get container status \"db13d32c6a59d4766deb65b8cb1037f68eb4ed8300fcb38385ee46341609f738\": rpc error: code = NotFound desc = could not find container \"db13d32c6a59d4766deb65b8cb1037f68eb4ed8300fcb38385ee46341609f738\": container with ID starting with db13d32c6a59d4766deb65b8cb1037f68eb4ed8300fcb38385ee46341609f738 not found: ID does not exist" Nov 25 09:57:30 crc kubenswrapper[4769]: I1125 09:57:30.006081 4769 scope.go:117] "RemoveContainer" containerID="84140c6f223e9a000dcdb5a06ac02f1cc135c404bfb5c52c194631acd352addf" Nov 25 09:57:30 crc kubenswrapper[4769]: E1125 09:57:30.006628 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84140c6f223e9a000dcdb5a06ac02f1cc135c404bfb5c52c194631acd352addf\": container with ID starting with 84140c6f223e9a000dcdb5a06ac02f1cc135c404bfb5c52c194631acd352addf not found: ID does not exist" containerID="84140c6f223e9a000dcdb5a06ac02f1cc135c404bfb5c52c194631acd352addf" Nov 25 09:57:30 crc kubenswrapper[4769]: I1125 09:57:30.006689 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84140c6f223e9a000dcdb5a06ac02f1cc135c404bfb5c52c194631acd352addf"} err="failed to get container status \"84140c6f223e9a000dcdb5a06ac02f1cc135c404bfb5c52c194631acd352addf\": rpc error: code = NotFound desc = could not find container \"84140c6f223e9a000dcdb5a06ac02f1cc135c404bfb5c52c194631acd352addf\": container with ID starting with 84140c6f223e9a000dcdb5a06ac02f1cc135c404bfb5c52c194631acd352addf not found: ID does not exist" Nov 25 09:57:30 crc kubenswrapper[4769]: I1125 09:57:30.250637 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00b17462-7c52-49bb-8839-99a88b04a050" path="/var/lib/kubelet/pods/00b17462-7c52-49bb-8839-99a88b04a050/volumes" Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.943939 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9x5s" event={"ID":"280c1227-51da-4ce3-a43b-08a0cbb9dedb","Type":"ContainerStarted","Data":"1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc"} Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.947091 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" event={"ID":"f535e254-5602-4794-9f47-e9bb2c1454b2","Type":"ContainerStarted","Data":"ac45169350e93f3d0e4c4cd4e4ccf47880f75b0b435870e1895eee2865f2933d"} Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.948219 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.948244 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.953298 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" event={"ID":"daa191b3-4057-42fd-8c0c-d0aa065af77b","Type":"ContainerStarted","Data":"af25c5703e199c96927b17e6cb40181d5bfcd108af0aec256064d343df78d26a"} Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.956252 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.956318 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.961822 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.966776 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.972678 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-s9x5s" podStartSLOduration=3.234752645 podStartE2EDuration="9.97265325s" podCreationTimestamp="2025-11-25 09:57:25 +0000 UTC" firstStartedPulling="2025-11-25 09:57:27.853882693 +0000 UTC m=+796.438855026" lastFinishedPulling="2025-11-25 09:57:34.591783318 +0000 UTC m=+803.176755631" observedRunningTime="2025-11-25 09:57:34.966985923 +0000 UTC m=+803.551958256" watchObservedRunningTime="2025-11-25 09:57:34.97265325 +0000 UTC m=+803.557625563" Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.976931 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.979317 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" Nov 25 09:57:34 crc kubenswrapper[4769]: I1125 09:57:34.993936 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" podStartSLOduration=2.603100925 podStartE2EDuration="14.993901994s" podCreationTimestamp="2025-11-25 09:57:20 +0000 UTC" firstStartedPulling="2025-11-25 09:57:22.218491163 +0000 UTC m=+790.803463476" lastFinishedPulling="2025-11-25 09:57:34.609292232 +0000 UTC m=+803.194264545" observedRunningTime="2025-11-25 09:57:34.989392655 +0000 UTC m=+803.574364978" watchObservedRunningTime="2025-11-25 09:57:34.993901994 +0000 UTC m=+803.578874307" Nov 25 09:57:35 crc kubenswrapper[4769]: I1125 09:57:35.018715 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" podStartSLOduration=2.489040466 podStartE2EDuration="15.018688193s" podCreationTimestamp="2025-11-25 09:57:20 +0000 UTC" firstStartedPulling="2025-11-25 09:57:22.095892878 +0000 UTC m=+790.680865191" lastFinishedPulling="2025-11-25 09:57:34.625540615 +0000 UTC m=+803.210512918" observedRunningTime="2025-11-25 09:57:35.015138657 +0000 UTC m=+803.600110970" watchObservedRunningTime="2025-11-25 09:57:35.018688193 +0000 UTC m=+803.603660506" Nov 25 09:57:36 crc kubenswrapper[4769]: I1125 09:57:36.110950 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:36 crc kubenswrapper[4769]: I1125 09:57:36.111780 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:37 crc kubenswrapper[4769]: I1125 09:57:37.161556 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-s9x5s" podUID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" containerName="registry-server" probeResult="failure" output=< Nov 25 09:57:37 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 09:57:37 crc kubenswrapper[4769]: > Nov 25 09:57:46 crc kubenswrapper[4769]: I1125 09:57:46.174426 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:46 crc kubenswrapper[4769]: I1125 09:57:46.228898 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:46 crc kubenswrapper[4769]: I1125 09:57:46.416300 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s9x5s"] Nov 25 09:57:48 crc kubenswrapper[4769]: I1125 09:57:48.061549 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-s9x5s" podUID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" containerName="registry-server" containerID="cri-o://1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc" gracePeriod=2 Nov 25 09:57:48 crc kubenswrapper[4769]: E1125 09:57:48.188497 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod280c1227_51da_4ce3_a43b_08a0cbb9dedb.slice/crio-conmon-1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:57:48 crc kubenswrapper[4769]: E1125 09:57:48.188810 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod280c1227_51da_4ce3_a43b_08a0cbb9dedb.slice/crio-conmon-1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:57:48 crc kubenswrapper[4769]: I1125 09:57:48.600757 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:48 crc kubenswrapper[4769]: I1125 09:57:48.702272 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4wlt\" (UniqueName: \"kubernetes.io/projected/280c1227-51da-4ce3-a43b-08a0cbb9dedb-kube-api-access-c4wlt\") pod \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\" (UID: \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\") " Nov 25 09:57:48 crc kubenswrapper[4769]: I1125 09:57:48.702438 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/280c1227-51da-4ce3-a43b-08a0cbb9dedb-utilities\") pod \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\" (UID: \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\") " Nov 25 09:57:48 crc kubenswrapper[4769]: I1125 09:57:48.702483 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/280c1227-51da-4ce3-a43b-08a0cbb9dedb-catalog-content\") pod \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\" (UID: \"280c1227-51da-4ce3-a43b-08a0cbb9dedb\") " Nov 25 09:57:48 crc kubenswrapper[4769]: I1125 09:57:48.704174 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/280c1227-51da-4ce3-a43b-08a0cbb9dedb-utilities" (OuterVolumeSpecName: "utilities") pod "280c1227-51da-4ce3-a43b-08a0cbb9dedb" (UID: "280c1227-51da-4ce3-a43b-08a0cbb9dedb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:57:48 crc kubenswrapper[4769]: I1125 09:57:48.709107 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/280c1227-51da-4ce3-a43b-08a0cbb9dedb-kube-api-access-c4wlt" (OuterVolumeSpecName: "kube-api-access-c4wlt") pod "280c1227-51da-4ce3-a43b-08a0cbb9dedb" (UID: "280c1227-51da-4ce3-a43b-08a0cbb9dedb"). InnerVolumeSpecName "kube-api-access-c4wlt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:57:48 crc kubenswrapper[4769]: I1125 09:57:48.752117 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/280c1227-51da-4ce3-a43b-08a0cbb9dedb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "280c1227-51da-4ce3-a43b-08a0cbb9dedb" (UID: "280c1227-51da-4ce3-a43b-08a0cbb9dedb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:57:48 crc kubenswrapper[4769]: I1125 09:57:48.804411 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/280c1227-51da-4ce3-a43b-08a0cbb9dedb-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:48 crc kubenswrapper[4769]: I1125 09:57:48.804458 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/280c1227-51da-4ce3-a43b-08a0cbb9dedb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:48 crc kubenswrapper[4769]: I1125 09:57:48.804476 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4wlt\" (UniqueName: \"kubernetes.io/projected/280c1227-51da-4ce3-a43b-08a0cbb9dedb-kube-api-access-c4wlt\") on node \"crc\" DevicePath \"\"" Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.070499 4769 generic.go:334] "Generic (PLEG): container finished" podID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" containerID="1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc" exitCode=0 Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.071487 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9x5s" event={"ID":"280c1227-51da-4ce3-a43b-08a0cbb9dedb","Type":"ContainerDied","Data":"1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc"} Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.071573 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s9x5s" event={"ID":"280c1227-51da-4ce3-a43b-08a0cbb9dedb","Type":"ContainerDied","Data":"e50510da82cced00c5540cc9bbeab6373536da9fc177e41c2a3fd9926e5a0d62"} Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.071641 4769 scope.go:117] "RemoveContainer" containerID="1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc" Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.071848 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s9x5s" Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.111820 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s9x5s"] Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.118397 4769 scope.go:117] "RemoveContainer" containerID="360a4c3786e9d5aa34cd3e9901cffc8e5285760c8dc2d1c3bd1ff7cedaf460a2" Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.132841 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-s9x5s"] Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.145141 4769 scope.go:117] "RemoveContainer" containerID="3ea9963fed5af9e1a5a5f42939131f2ac951def1197925af19d6ae4be1ebc2a8" Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.167510 4769 scope.go:117] "RemoveContainer" containerID="1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc" Nov 25 09:57:49 crc kubenswrapper[4769]: E1125 09:57:49.168063 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc\": container with ID starting with 1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc not found: ID does not exist" containerID="1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc" Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.168101 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc"} err="failed to get container status \"1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc\": rpc error: code = NotFound desc = could not find container \"1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc\": container with ID starting with 1152d598d14ddc3b76be4ca470f1500bf5a596b2c193b1032f55574f9f2e1cbc not found: ID does not exist" Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.168128 4769 scope.go:117] "RemoveContainer" containerID="360a4c3786e9d5aa34cd3e9901cffc8e5285760c8dc2d1c3bd1ff7cedaf460a2" Nov 25 09:57:49 crc kubenswrapper[4769]: E1125 09:57:49.168357 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"360a4c3786e9d5aa34cd3e9901cffc8e5285760c8dc2d1c3bd1ff7cedaf460a2\": container with ID starting with 360a4c3786e9d5aa34cd3e9901cffc8e5285760c8dc2d1c3bd1ff7cedaf460a2 not found: ID does not exist" containerID="360a4c3786e9d5aa34cd3e9901cffc8e5285760c8dc2d1c3bd1ff7cedaf460a2" Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.168380 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"360a4c3786e9d5aa34cd3e9901cffc8e5285760c8dc2d1c3bd1ff7cedaf460a2"} err="failed to get container status \"360a4c3786e9d5aa34cd3e9901cffc8e5285760c8dc2d1c3bd1ff7cedaf460a2\": rpc error: code = NotFound desc = could not find container \"360a4c3786e9d5aa34cd3e9901cffc8e5285760c8dc2d1c3bd1ff7cedaf460a2\": container with ID starting with 360a4c3786e9d5aa34cd3e9901cffc8e5285760c8dc2d1c3bd1ff7cedaf460a2 not found: ID does not exist" Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.168395 4769 scope.go:117] "RemoveContainer" containerID="3ea9963fed5af9e1a5a5f42939131f2ac951def1197925af19d6ae4be1ebc2a8" Nov 25 09:57:49 crc kubenswrapper[4769]: E1125 09:57:49.168578 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ea9963fed5af9e1a5a5f42939131f2ac951def1197925af19d6ae4be1ebc2a8\": container with ID starting with 3ea9963fed5af9e1a5a5f42939131f2ac951def1197925af19d6ae4be1ebc2a8 not found: ID does not exist" containerID="3ea9963fed5af9e1a5a5f42939131f2ac951def1197925af19d6ae4be1ebc2a8" Nov 25 09:57:49 crc kubenswrapper[4769]: I1125 09:57:49.168599 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ea9963fed5af9e1a5a5f42939131f2ac951def1197925af19d6ae4be1ebc2a8"} err="failed to get container status \"3ea9963fed5af9e1a5a5f42939131f2ac951def1197925af19d6ae4be1ebc2a8\": rpc error: code = NotFound desc = could not find container \"3ea9963fed5af9e1a5a5f42939131f2ac951def1197925af19d6ae4be1ebc2a8\": container with ID starting with 3ea9963fed5af9e1a5a5f42939131f2ac951def1197925af19d6ae4be1ebc2a8 not found: ID does not exist" Nov 25 09:57:50 crc kubenswrapper[4769]: I1125 09:57:50.246928 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" path="/var/lib/kubelet/pods/280c1227-51da-4ce3-a43b-08a0cbb9dedb/volumes" Nov 25 09:57:50 crc kubenswrapper[4769]: I1125 09:57:50.425060 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" Nov 25 09:57:50 crc kubenswrapper[4769]: I1125 09:57:50.690270 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" Nov 25 09:57:50 crc kubenswrapper[4769]: I1125 09:57:50.805844 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" Nov 25 09:57:51 crc kubenswrapper[4769]: I1125 09:57:51.737562 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Nov 25 09:57:51 crc kubenswrapper[4769]: I1125 09:57:51.859308 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Nov 25 09:57:51 crc kubenswrapper[4769]: I1125 09:57:51.920276 4769 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Nov 25 09:57:51 crc kubenswrapper[4769]: I1125 09:57:51.920676 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="853f39bc-058d-47a2-82f1-827f485f11a5" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 09:58:01 crc kubenswrapper[4769]: I1125 09:58:01.888692 4769 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Nov 25 09:58:01 crc kubenswrapper[4769]: I1125 09:58:01.890170 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="853f39bc-058d-47a2-82f1-827f485f11a5" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.398598 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6bvms"] Nov 25 09:58:05 crc kubenswrapper[4769]: E1125 09:58:05.399547 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" containerName="registry-server" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.399565 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" containerName="registry-server" Nov 25 09:58:05 crc kubenswrapper[4769]: E1125 09:58:05.399582 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b17462-7c52-49bb-8839-99a88b04a050" containerName="registry-server" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.399590 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b17462-7c52-49bb-8839-99a88b04a050" containerName="registry-server" Nov 25 09:58:05 crc kubenswrapper[4769]: E1125 09:58:05.399601 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" containerName="extract-content" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.399609 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" containerName="extract-content" Nov 25 09:58:05 crc kubenswrapper[4769]: E1125 09:58:05.399620 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b17462-7c52-49bb-8839-99a88b04a050" containerName="extract-utilities" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.399628 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b17462-7c52-49bb-8839-99a88b04a050" containerName="extract-utilities" Nov 25 09:58:05 crc kubenswrapper[4769]: E1125 09:58:05.399641 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" containerName="extract-utilities" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.399648 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" containerName="extract-utilities" Nov 25 09:58:05 crc kubenswrapper[4769]: E1125 09:58:05.399673 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00b17462-7c52-49bb-8839-99a88b04a050" containerName="extract-content" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.399681 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="00b17462-7c52-49bb-8839-99a88b04a050" containerName="extract-content" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.399838 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="280c1227-51da-4ce3-a43b-08a0cbb9dedb" containerName="registry-server" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.399851 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="00b17462-7c52-49bb-8839-99a88b04a050" containerName="registry-server" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.401035 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.416005 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6bvms"] Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.527675 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff0180d9-fa98-44d3-8398-48c95759fdb4-catalog-content\") pod \"redhat-marketplace-6bvms\" (UID: \"ff0180d9-fa98-44d3-8398-48c95759fdb4\") " pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.527763 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff0180d9-fa98-44d3-8398-48c95759fdb4-utilities\") pod \"redhat-marketplace-6bvms\" (UID: \"ff0180d9-fa98-44d3-8398-48c95759fdb4\") " pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.527915 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ntpg\" (UniqueName: \"kubernetes.io/projected/ff0180d9-fa98-44d3-8398-48c95759fdb4-kube-api-access-2ntpg\") pod \"redhat-marketplace-6bvms\" (UID: \"ff0180d9-fa98-44d3-8398-48c95759fdb4\") " pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.630199 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff0180d9-fa98-44d3-8398-48c95759fdb4-catalog-content\") pod \"redhat-marketplace-6bvms\" (UID: \"ff0180d9-fa98-44d3-8398-48c95759fdb4\") " pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.630271 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff0180d9-fa98-44d3-8398-48c95759fdb4-utilities\") pod \"redhat-marketplace-6bvms\" (UID: \"ff0180d9-fa98-44d3-8398-48c95759fdb4\") " pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.630328 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ntpg\" (UniqueName: \"kubernetes.io/projected/ff0180d9-fa98-44d3-8398-48c95759fdb4-kube-api-access-2ntpg\") pod \"redhat-marketplace-6bvms\" (UID: \"ff0180d9-fa98-44d3-8398-48c95759fdb4\") " pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.630740 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff0180d9-fa98-44d3-8398-48c95759fdb4-catalog-content\") pod \"redhat-marketplace-6bvms\" (UID: \"ff0180d9-fa98-44d3-8398-48c95759fdb4\") " pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.631206 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff0180d9-fa98-44d3-8398-48c95759fdb4-utilities\") pod \"redhat-marketplace-6bvms\" (UID: \"ff0180d9-fa98-44d3-8398-48c95759fdb4\") " pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.657654 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ntpg\" (UniqueName: \"kubernetes.io/projected/ff0180d9-fa98-44d3-8398-48c95759fdb4-kube-api-access-2ntpg\") pod \"redhat-marketplace-6bvms\" (UID: \"ff0180d9-fa98-44d3-8398-48c95759fdb4\") " pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:05 crc kubenswrapper[4769]: I1125 09:58:05.727317 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:06 crc kubenswrapper[4769]: I1125 09:58:06.195486 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6bvms"] Nov 25 09:58:06 crc kubenswrapper[4769]: W1125 09:58:06.214252 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff0180d9_fa98_44d3_8398_48c95759fdb4.slice/crio-a9088d0215df0366547ee60daa6819669f9c6a85fb639a9c791656d4a40c2449 WatchSource:0}: Error finding container a9088d0215df0366547ee60daa6819669f9c6a85fb639a9c791656d4a40c2449: Status 404 returned error can't find the container with id a9088d0215df0366547ee60daa6819669f9c6a85fb639a9c791656d4a40c2449 Nov 25 09:58:06 crc kubenswrapper[4769]: I1125 09:58:06.222882 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6bvms" event={"ID":"ff0180d9-fa98-44d3-8398-48c95759fdb4","Type":"ContainerStarted","Data":"a9088d0215df0366547ee60daa6819669f9c6a85fb639a9c791656d4a40c2449"} Nov 25 09:58:07 crc kubenswrapper[4769]: I1125 09:58:07.233523 4769 generic.go:334] "Generic (PLEG): container finished" podID="ff0180d9-fa98-44d3-8398-48c95759fdb4" containerID="2e7ffb716b606f2f2351a166707af532a93486c209d60af3b49a89564de84bd6" exitCode=0 Nov 25 09:58:07 crc kubenswrapper[4769]: I1125 09:58:07.233580 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6bvms" event={"ID":"ff0180d9-fa98-44d3-8398-48c95759fdb4","Type":"ContainerDied","Data":"2e7ffb716b606f2f2351a166707af532a93486c209d60af3b49a89564de84bd6"} Nov 25 09:58:08 crc kubenswrapper[4769]: I1125 09:58:08.248497 4769 generic.go:334] "Generic (PLEG): container finished" podID="ff0180d9-fa98-44d3-8398-48c95759fdb4" containerID="2633580ac6b9e0e44109377f93632e448954585c7f31c9f055e254d7e8277631" exitCode=0 Nov 25 09:58:08 crc kubenswrapper[4769]: I1125 09:58:08.248595 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6bvms" event={"ID":"ff0180d9-fa98-44d3-8398-48c95759fdb4","Type":"ContainerDied","Data":"2633580ac6b9e0e44109377f93632e448954585c7f31c9f055e254d7e8277631"} Nov 25 09:58:09 crc kubenswrapper[4769]: I1125 09:58:09.261367 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6bvms" event={"ID":"ff0180d9-fa98-44d3-8398-48c95759fdb4","Type":"ContainerStarted","Data":"66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77"} Nov 25 09:58:09 crc kubenswrapper[4769]: I1125 09:58:09.294018 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6bvms" podStartSLOduration=2.789775424 podStartE2EDuration="4.293949513s" podCreationTimestamp="2025-11-25 09:58:05 +0000 UTC" firstStartedPulling="2025-11-25 09:58:07.23633394 +0000 UTC m=+835.821306253" lastFinishedPulling="2025-11-25 09:58:08.740508019 +0000 UTC m=+837.325480342" observedRunningTime="2025-11-25 09:58:09.285699313 +0000 UTC m=+837.870671636" watchObservedRunningTime="2025-11-25 09:58:09.293949513 +0000 UTC m=+837.878921876" Nov 25 09:58:11 crc kubenswrapper[4769]: I1125 09:58:11.887881 4769 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Nov 25 09:58:11 crc kubenswrapper[4769]: I1125 09:58:11.887987 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="853f39bc-058d-47a2-82f1-827f485f11a5" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 09:58:15 crc kubenswrapper[4769]: I1125 09:58:15.728318 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:15 crc kubenswrapper[4769]: I1125 09:58:15.729016 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:15 crc kubenswrapper[4769]: I1125 09:58:15.800867 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:16 crc kubenswrapper[4769]: I1125 09:58:16.369144 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:16 crc kubenswrapper[4769]: I1125 09:58:16.443553 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6bvms"] Nov 25 09:58:18 crc kubenswrapper[4769]: I1125 09:58:18.341387 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6bvms" podUID="ff0180d9-fa98-44d3-8398-48c95759fdb4" containerName="registry-server" containerID="cri-o://66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77" gracePeriod=2 Nov 25 09:58:18 crc kubenswrapper[4769]: I1125 09:58:18.804358 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:18 crc kubenswrapper[4769]: I1125 09:58:18.897187 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff0180d9-fa98-44d3-8398-48c95759fdb4-utilities\") pod \"ff0180d9-fa98-44d3-8398-48c95759fdb4\" (UID: \"ff0180d9-fa98-44d3-8398-48c95759fdb4\") " Nov 25 09:58:18 crc kubenswrapper[4769]: I1125 09:58:18.897414 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff0180d9-fa98-44d3-8398-48c95759fdb4-catalog-content\") pod \"ff0180d9-fa98-44d3-8398-48c95759fdb4\" (UID: \"ff0180d9-fa98-44d3-8398-48c95759fdb4\") " Nov 25 09:58:18 crc kubenswrapper[4769]: I1125 09:58:18.897559 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ntpg\" (UniqueName: \"kubernetes.io/projected/ff0180d9-fa98-44d3-8398-48c95759fdb4-kube-api-access-2ntpg\") pod \"ff0180d9-fa98-44d3-8398-48c95759fdb4\" (UID: \"ff0180d9-fa98-44d3-8398-48c95759fdb4\") " Nov 25 09:58:18 crc kubenswrapper[4769]: I1125 09:58:18.899165 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff0180d9-fa98-44d3-8398-48c95759fdb4-utilities" (OuterVolumeSpecName: "utilities") pod "ff0180d9-fa98-44d3-8398-48c95759fdb4" (UID: "ff0180d9-fa98-44d3-8398-48c95759fdb4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:18 crc kubenswrapper[4769]: I1125 09:58:18.905007 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff0180d9-fa98-44d3-8398-48c95759fdb4-kube-api-access-2ntpg" (OuterVolumeSpecName: "kube-api-access-2ntpg") pod "ff0180d9-fa98-44d3-8398-48c95759fdb4" (UID: "ff0180d9-fa98-44d3-8398-48c95759fdb4"). InnerVolumeSpecName "kube-api-access-2ntpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:18 crc kubenswrapper[4769]: I1125 09:58:18.924075 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff0180d9-fa98-44d3-8398-48c95759fdb4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ff0180d9-fa98-44d3-8398-48c95759fdb4" (UID: "ff0180d9-fa98-44d3-8398-48c95759fdb4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.000063 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff0180d9-fa98-44d3-8398-48c95759fdb4-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.000129 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff0180d9-fa98-44d3-8398-48c95759fdb4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.000151 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ntpg\" (UniqueName: \"kubernetes.io/projected/ff0180d9-fa98-44d3-8398-48c95759fdb4-kube-api-access-2ntpg\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.350676 4769 generic.go:334] "Generic (PLEG): container finished" podID="ff0180d9-fa98-44d3-8398-48c95759fdb4" containerID="66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77" exitCode=0 Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.350729 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6bvms" event={"ID":"ff0180d9-fa98-44d3-8398-48c95759fdb4","Type":"ContainerDied","Data":"66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77"} Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.350761 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6bvms" event={"ID":"ff0180d9-fa98-44d3-8398-48c95759fdb4","Type":"ContainerDied","Data":"a9088d0215df0366547ee60daa6819669f9c6a85fb639a9c791656d4a40c2449"} Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.350779 4769 scope.go:117] "RemoveContainer" containerID="66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.350849 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6bvms" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.368823 4769 scope.go:117] "RemoveContainer" containerID="2633580ac6b9e0e44109377f93632e448954585c7f31c9f055e254d7e8277631" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.395314 4769 scope.go:117] "RemoveContainer" containerID="2e7ffb716b606f2f2351a166707af532a93486c209d60af3b49a89564de84bd6" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.400537 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6bvms"] Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.404565 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6bvms"] Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.432718 4769 scope.go:117] "RemoveContainer" containerID="66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77" Nov 25 09:58:19 crc kubenswrapper[4769]: E1125 09:58:19.433437 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77\": container with ID starting with 66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77 not found: ID does not exist" containerID="66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.433491 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77"} err="failed to get container status \"66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77\": rpc error: code = NotFound desc = could not find container \"66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77\": container with ID starting with 66961a973d01a33eddfc4dd010b9454f9cc16c505dd1d06b7e9483118ca30f77 not found: ID does not exist" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.433528 4769 scope.go:117] "RemoveContainer" containerID="2633580ac6b9e0e44109377f93632e448954585c7f31c9f055e254d7e8277631" Nov 25 09:58:19 crc kubenswrapper[4769]: E1125 09:58:19.434138 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2633580ac6b9e0e44109377f93632e448954585c7f31c9f055e254d7e8277631\": container with ID starting with 2633580ac6b9e0e44109377f93632e448954585c7f31c9f055e254d7e8277631 not found: ID does not exist" containerID="2633580ac6b9e0e44109377f93632e448954585c7f31c9f055e254d7e8277631" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.434285 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2633580ac6b9e0e44109377f93632e448954585c7f31c9f055e254d7e8277631"} err="failed to get container status \"2633580ac6b9e0e44109377f93632e448954585c7f31c9f055e254d7e8277631\": rpc error: code = NotFound desc = could not find container \"2633580ac6b9e0e44109377f93632e448954585c7f31c9f055e254d7e8277631\": container with ID starting with 2633580ac6b9e0e44109377f93632e448954585c7f31c9f055e254d7e8277631 not found: ID does not exist" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.434402 4769 scope.go:117] "RemoveContainer" containerID="2e7ffb716b606f2f2351a166707af532a93486c209d60af3b49a89564de84bd6" Nov 25 09:58:19 crc kubenswrapper[4769]: E1125 09:58:19.435226 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e7ffb716b606f2f2351a166707af532a93486c209d60af3b49a89564de84bd6\": container with ID starting with 2e7ffb716b606f2f2351a166707af532a93486c209d60af3b49a89564de84bd6 not found: ID does not exist" containerID="2e7ffb716b606f2f2351a166707af532a93486c209d60af3b49a89564de84bd6" Nov 25 09:58:19 crc kubenswrapper[4769]: I1125 09:58:19.435261 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e7ffb716b606f2f2351a166707af532a93486c209d60af3b49a89564de84bd6"} err="failed to get container status \"2e7ffb716b606f2f2351a166707af532a93486c209d60af3b49a89564de84bd6\": rpc error: code = NotFound desc = could not find container \"2e7ffb716b606f2f2351a166707af532a93486c209d60af3b49a89564de84bd6\": container with ID starting with 2e7ffb716b606f2f2351a166707af532a93486c209d60af3b49a89564de84bd6 not found: ID does not exist" Nov 25 09:58:20 crc kubenswrapper[4769]: I1125 09:58:20.248329 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff0180d9-fa98-44d3-8398-48c95759fdb4" path="/var/lib/kubelet/pods/ff0180d9-fa98-44d3-8398-48c95759fdb4/volumes" Nov 25 09:58:21 crc kubenswrapper[4769]: I1125 09:58:21.887360 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.263186 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4ch7j"] Nov 25 09:58:30 crc kubenswrapper[4769]: E1125 09:58:30.264504 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff0180d9-fa98-44d3-8398-48c95759fdb4" containerName="extract-content" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.264531 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff0180d9-fa98-44d3-8398-48c95759fdb4" containerName="extract-content" Nov 25 09:58:30 crc kubenswrapper[4769]: E1125 09:58:30.264582 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff0180d9-fa98-44d3-8398-48c95759fdb4" containerName="extract-utilities" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.264596 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff0180d9-fa98-44d3-8398-48c95759fdb4" containerName="extract-utilities" Nov 25 09:58:30 crc kubenswrapper[4769]: E1125 09:58:30.264623 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff0180d9-fa98-44d3-8398-48c95759fdb4" containerName="registry-server" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.264638 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff0180d9-fa98-44d3-8398-48c95759fdb4" containerName="registry-server" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.264920 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff0180d9-fa98-44d3-8398-48c95759fdb4" containerName="registry-server" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.268564 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.277549 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4ch7j"] Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.334871 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd33c731-bfb7-4848-a754-5d625ecf5bba-utilities\") pod \"redhat-operators-4ch7j\" (UID: \"fd33c731-bfb7-4848-a754-5d625ecf5bba\") " pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.334938 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hx8v\" (UniqueName: \"kubernetes.io/projected/fd33c731-bfb7-4848-a754-5d625ecf5bba-kube-api-access-8hx8v\") pod \"redhat-operators-4ch7j\" (UID: \"fd33c731-bfb7-4848-a754-5d625ecf5bba\") " pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.335001 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd33c731-bfb7-4848-a754-5d625ecf5bba-catalog-content\") pod \"redhat-operators-4ch7j\" (UID: \"fd33c731-bfb7-4848-a754-5d625ecf5bba\") " pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.437320 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd33c731-bfb7-4848-a754-5d625ecf5bba-utilities\") pod \"redhat-operators-4ch7j\" (UID: \"fd33c731-bfb7-4848-a754-5d625ecf5bba\") " pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.438130 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd33c731-bfb7-4848-a754-5d625ecf5bba-utilities\") pod \"redhat-operators-4ch7j\" (UID: \"fd33c731-bfb7-4848-a754-5d625ecf5bba\") " pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.438332 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hx8v\" (UniqueName: \"kubernetes.io/projected/fd33c731-bfb7-4848-a754-5d625ecf5bba-kube-api-access-8hx8v\") pod \"redhat-operators-4ch7j\" (UID: \"fd33c731-bfb7-4848-a754-5d625ecf5bba\") " pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.438648 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd33c731-bfb7-4848-a754-5d625ecf5bba-catalog-content\") pod \"redhat-operators-4ch7j\" (UID: \"fd33c731-bfb7-4848-a754-5d625ecf5bba\") " pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.439255 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd33c731-bfb7-4848-a754-5d625ecf5bba-catalog-content\") pod \"redhat-operators-4ch7j\" (UID: \"fd33c731-bfb7-4848-a754-5d625ecf5bba\") " pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.467050 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hx8v\" (UniqueName: \"kubernetes.io/projected/fd33c731-bfb7-4848-a754-5d625ecf5bba-kube-api-access-8hx8v\") pod \"redhat-operators-4ch7j\" (UID: \"fd33c731-bfb7-4848-a754-5d625ecf5bba\") " pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:30 crc kubenswrapper[4769]: I1125 09:58:30.672864 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:31 crc kubenswrapper[4769]: I1125 09:58:31.129571 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4ch7j"] Nov 25 09:58:31 crc kubenswrapper[4769]: I1125 09:58:31.466114 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd33c731-bfb7-4848-a754-5d625ecf5bba" containerID="860fc263361e31a977e88aa0a3b458d89006aae381c7b2f9b4a0f7ea8acee153" exitCode=0 Nov 25 09:58:31 crc kubenswrapper[4769]: I1125 09:58:31.466203 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4ch7j" event={"ID":"fd33c731-bfb7-4848-a754-5d625ecf5bba","Type":"ContainerDied","Data":"860fc263361e31a977e88aa0a3b458d89006aae381c7b2f9b4a0f7ea8acee153"} Nov 25 09:58:31 crc kubenswrapper[4769]: I1125 09:58:31.466314 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4ch7j" event={"ID":"fd33c731-bfb7-4848-a754-5d625ecf5bba","Type":"ContainerStarted","Data":"bae13991eecae2672604d3a525d93a264b05608368c936b23d14165d29b04d29"} Nov 25 09:58:36 crc kubenswrapper[4769]: I1125 09:58:36.520260 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4ch7j" event={"ID":"fd33c731-bfb7-4848-a754-5d625ecf5bba","Type":"ContainerStarted","Data":"9759364875a645c683d41a69d90c9a2b07abf3c89eb37810fc46ead6c870843f"} Nov 25 09:58:37 crc kubenswrapper[4769]: I1125 09:58:37.534414 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd33c731-bfb7-4848-a754-5d625ecf5bba" containerID="9759364875a645c683d41a69d90c9a2b07abf3c89eb37810fc46ead6c870843f" exitCode=0 Nov 25 09:58:37 crc kubenswrapper[4769]: I1125 09:58:37.534493 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4ch7j" event={"ID":"fd33c731-bfb7-4848-a754-5d625ecf5bba","Type":"ContainerDied","Data":"9759364875a645c683d41a69d90c9a2b07abf3c89eb37810fc46ead6c870843f"} Nov 25 09:58:38 crc kubenswrapper[4769]: I1125 09:58:38.555642 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4ch7j" event={"ID":"fd33c731-bfb7-4848-a754-5d625ecf5bba","Type":"ContainerStarted","Data":"402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd"} Nov 25 09:58:38 crc kubenswrapper[4769]: I1125 09:58:38.591449 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4ch7j" podStartSLOduration=2.143545796 podStartE2EDuration="8.591420955s" podCreationTimestamp="2025-11-25 09:58:30 +0000 UTC" firstStartedPulling="2025-11-25 09:58:31.469670218 +0000 UTC m=+860.054642541" lastFinishedPulling="2025-11-25 09:58:37.917545377 +0000 UTC m=+866.502517700" observedRunningTime="2025-11-25 09:58:38.583642877 +0000 UTC m=+867.168615200" watchObservedRunningTime="2025-11-25 09:58:38.591420955 +0000 UTC m=+867.176393268" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.674090 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.674214 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.829564 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-b6p9s"] Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.830671 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-b6p9s" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.841165 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.841286 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.841360 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.842043 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-wtzkt" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.843381 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.857445 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.867078 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-b6p9s"] Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.948439 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-config\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.948505 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-syslog-receiver\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.948602 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-config-openshift-service-cacrt\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.948648 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dvtg\" (UniqueName: \"kubernetes.io/projected/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-kube-api-access-2dvtg\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.948704 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-sa-token\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.948730 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-trusted-ca\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.948794 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-metrics\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.948984 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-token\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.949104 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-datadir\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.949177 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-tmp\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:40 crc kubenswrapper[4769]: I1125 09:58:40.949257 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-entrypoint\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.009196 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-b6p9s"] Nov 25 09:58:41 crc kubenswrapper[4769]: E1125 09:58:41.010302 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-2dvtg metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-logging/collector-b6p9s" podUID="2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.050882 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-entrypoint\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.051044 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-config\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.051083 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-syslog-receiver\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.051137 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-config-openshift-service-cacrt\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.051180 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dvtg\" (UniqueName: \"kubernetes.io/projected/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-kube-api-access-2dvtg\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.051229 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-sa-token\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.051258 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-trusted-ca\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.051279 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-metrics\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.051301 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-token\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: E1125 09:58:41.051317 4769 secret.go:188] Couldn't get secret openshift-logging/collector-syslog-receiver: secret "collector-syslog-receiver" not found Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.051333 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-datadir\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.051371 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-tmp\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: E1125 09:58:41.051409 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-syslog-receiver podName:2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9 nodeName:}" failed. No retries permitted until 2025-11-25 09:58:41.55138081 +0000 UTC m=+870.136353123 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "collector-syslog-receiver" (UniqueName: "kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-syslog-receiver") pod "collector-b6p9s" (UID: "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9") : secret "collector-syslog-receiver" not found Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.051485 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-datadir\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.052259 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-entrypoint\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.053029 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-config\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.053034 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-config-openshift-service-cacrt\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.055223 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-trusted-ca\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.059298 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-tmp\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.059602 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-metrics\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.071818 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-token\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.071905 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-sa-token\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.072717 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dvtg\" (UniqueName: \"kubernetes.io/projected/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-kube-api-access-2dvtg\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.560058 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-syslog-receiver\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.564944 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-syslog-receiver\") pod \"collector-b6p9s\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.580403 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.609310 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-b6p9s" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.661616 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-config\") pod \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.661659 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-config-openshift-service-cacrt\") pod \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.661677 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-trusted-ca\") pod \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.661709 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-entrypoint\") pod \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.661773 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-sa-token\") pod \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.661847 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-token\") pod \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.661896 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-tmp\") pod \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.661926 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-datadir\") pod \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.661953 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dvtg\" (UniqueName: \"kubernetes.io/projected/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-kube-api-access-2dvtg\") pod \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.661989 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-syslog-receiver\") pod \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.662033 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-metrics\") pod \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\" (UID: \"2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9\") " Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.662759 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-datadir" (OuterVolumeSpecName: "datadir") pod "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" (UID: "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.663374 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" (UID: "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.664390 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" (UID: "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.664489 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-config" (OuterVolumeSpecName: "config") pod "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" (UID: "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.664860 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" (UID: "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.666854 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-tmp" (OuterVolumeSpecName: "tmp") pod "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" (UID: "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.667759 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" (UID: "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.668877 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-metrics" (OuterVolumeSpecName: "metrics") pod "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" (UID: "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.670173 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-kube-api-access-2dvtg" (OuterVolumeSpecName: "kube-api-access-2dvtg") pod "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" (UID: "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9"). InnerVolumeSpecName "kube-api-access-2dvtg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.670355 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-token" (OuterVolumeSpecName: "collector-token") pod "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" (UID: "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.671347 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-sa-token" (OuterVolumeSpecName: "sa-token") pod "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" (UID: "2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.732888 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4ch7j" podUID="fd33c731-bfb7-4848-a754-5d625ecf5bba" containerName="registry-server" probeResult="failure" output=< Nov 25 09:58:41 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 09:58:41 crc kubenswrapper[4769]: > Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.764414 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.764469 4769 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.764482 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.764496 4769 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-entrypoint\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.764505 4769 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.764514 4769 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-token\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.764523 4769 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-tmp\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.764536 4769 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-datadir\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.764548 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dvtg\" (UniqueName: \"kubernetes.io/projected/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-kube-api-access-2dvtg\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.764560 4769 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:41 crc kubenswrapper[4769]: I1125 09:58:41.764568 4769 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.586989 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-b6p9s" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.623729 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-b6p9s"] Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.638836 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-b6p9s"] Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.652562 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-f5b9l"] Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.653926 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.657263 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.657637 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.657823 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-wtzkt" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.658034 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.658465 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.668857 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.676842 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-f5b9l"] Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.681525 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c107b3e5-59bc-48d8-927b-565e46733679-trusted-ca\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.681660 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/c107b3e5-59bc-48d8-927b-565e46733679-metrics\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.681762 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/c107b3e5-59bc-48d8-927b-565e46733679-entrypoint\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.681813 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/c107b3e5-59bc-48d8-927b-565e46733679-sa-token\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.681853 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/c107b3e5-59bc-48d8-927b-565e46733679-datadir\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.681995 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/c107b3e5-59bc-48d8-927b-565e46733679-collector-syslog-receiver\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.682041 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/c107b3e5-59bc-48d8-927b-565e46733679-tmp\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.682068 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/c107b3e5-59bc-48d8-927b-565e46733679-collector-token\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.682094 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c107b3e5-59bc-48d8-927b-565e46733679-config\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.682292 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/c107b3e5-59bc-48d8-927b-565e46733679-config-openshift-service-cacrt\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.682353 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtx79\" (UniqueName: \"kubernetes.io/projected/c107b3e5-59bc-48d8-927b-565e46733679-kube-api-access-gtx79\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.784440 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c107b3e5-59bc-48d8-927b-565e46733679-trusted-ca\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.784505 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/c107b3e5-59bc-48d8-927b-565e46733679-metrics\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.784535 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/c107b3e5-59bc-48d8-927b-565e46733679-entrypoint\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.784568 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/c107b3e5-59bc-48d8-927b-565e46733679-sa-token\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.784607 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/c107b3e5-59bc-48d8-927b-565e46733679-datadir\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.784634 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/c107b3e5-59bc-48d8-927b-565e46733679-collector-syslog-receiver\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.784657 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/c107b3e5-59bc-48d8-927b-565e46733679-tmp\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.784679 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/c107b3e5-59bc-48d8-927b-565e46733679-collector-token\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.784696 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c107b3e5-59bc-48d8-927b-565e46733679-config\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.784741 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/c107b3e5-59bc-48d8-927b-565e46733679-config-openshift-service-cacrt\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.784762 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtx79\" (UniqueName: \"kubernetes.io/projected/c107b3e5-59bc-48d8-927b-565e46733679-kube-api-access-gtx79\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.786061 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/c107b3e5-59bc-48d8-927b-565e46733679-datadir\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.786761 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c107b3e5-59bc-48d8-927b-565e46733679-trusted-ca\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.787202 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c107b3e5-59bc-48d8-927b-565e46733679-config\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.787581 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/c107b3e5-59bc-48d8-927b-565e46733679-config-openshift-service-cacrt\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.787988 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/c107b3e5-59bc-48d8-927b-565e46733679-entrypoint\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.792934 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/c107b3e5-59bc-48d8-927b-565e46733679-tmp\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.794179 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/c107b3e5-59bc-48d8-927b-565e46733679-collector-token\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.801777 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/c107b3e5-59bc-48d8-927b-565e46733679-collector-syslog-receiver\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.808514 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/c107b3e5-59bc-48d8-927b-565e46733679-metrics\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.817139 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/c107b3e5-59bc-48d8-927b-565e46733679-sa-token\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.821801 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtx79\" (UniqueName: \"kubernetes.io/projected/c107b3e5-59bc-48d8-927b-565e46733679-kube-api-access-gtx79\") pod \"collector-f5b9l\" (UID: \"c107b3e5-59bc-48d8-927b-565e46733679\") " pod="openshift-logging/collector-f5b9l" Nov 25 09:58:42 crc kubenswrapper[4769]: I1125 09:58:42.977630 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-f5b9l" Nov 25 09:58:43 crc kubenswrapper[4769]: I1125 09:58:43.447508 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-f5b9l"] Nov 25 09:58:43 crc kubenswrapper[4769]: I1125 09:58:43.595947 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-f5b9l" event={"ID":"c107b3e5-59bc-48d8-927b-565e46733679","Type":"ContainerStarted","Data":"9b7098e22daffc9fd1641604b5a7509f0c1a65dafe6a0cb1dec756840b975a12"} Nov 25 09:58:44 crc kubenswrapper[4769]: I1125 09:58:44.246140 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9" path="/var/lib/kubelet/pods/2e9147de-d9bf-4cb4-9083-a8ec2d4cb2d9/volumes" Nov 25 09:58:50 crc kubenswrapper[4769]: I1125 09:58:50.654241 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-f5b9l" event={"ID":"c107b3e5-59bc-48d8-927b-565e46733679","Type":"ContainerStarted","Data":"78c9252ecbc771d480efeaab9c4998676ad06ef60f30ad14cc0b663d06955660"} Nov 25 09:58:50 crc kubenswrapper[4769]: I1125 09:58:50.681866 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-f5b9l" podStartSLOduration=1.9411752839999998 podStartE2EDuration="8.681841819s" podCreationTimestamp="2025-11-25 09:58:42 +0000 UTC" firstStartedPulling="2025-11-25 09:58:43.457117041 +0000 UTC m=+872.042089354" lastFinishedPulling="2025-11-25 09:58:50.197783576 +0000 UTC m=+878.782755889" observedRunningTime="2025-11-25 09:58:50.675911584 +0000 UTC m=+879.260883897" watchObservedRunningTime="2025-11-25 09:58:50.681841819 +0000 UTC m=+879.266814132" Nov 25 09:58:50 crc kubenswrapper[4769]: I1125 09:58:50.734799 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:50 crc kubenswrapper[4769]: I1125 09:58:50.790993 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:50 crc kubenswrapper[4769]: I1125 09:58:50.972303 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4ch7j"] Nov 25 09:58:52 crc kubenswrapper[4769]: I1125 09:58:52.668448 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4ch7j" podUID="fd33c731-bfb7-4848-a754-5d625ecf5bba" containerName="registry-server" containerID="cri-o://402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd" gracePeriod=2 Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.154356 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.185515 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd33c731-bfb7-4848-a754-5d625ecf5bba-utilities\") pod \"fd33c731-bfb7-4848-a754-5d625ecf5bba\" (UID: \"fd33c731-bfb7-4848-a754-5d625ecf5bba\") " Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.185769 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd33c731-bfb7-4848-a754-5d625ecf5bba-catalog-content\") pod \"fd33c731-bfb7-4848-a754-5d625ecf5bba\" (UID: \"fd33c731-bfb7-4848-a754-5d625ecf5bba\") " Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.185936 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hx8v\" (UniqueName: \"kubernetes.io/projected/fd33c731-bfb7-4848-a754-5d625ecf5bba-kube-api-access-8hx8v\") pod \"fd33c731-bfb7-4848-a754-5d625ecf5bba\" (UID: \"fd33c731-bfb7-4848-a754-5d625ecf5bba\") " Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.186812 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd33c731-bfb7-4848-a754-5d625ecf5bba-utilities" (OuterVolumeSpecName: "utilities") pod "fd33c731-bfb7-4848-a754-5d625ecf5bba" (UID: "fd33c731-bfb7-4848-a754-5d625ecf5bba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.194389 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd33c731-bfb7-4848-a754-5d625ecf5bba-kube-api-access-8hx8v" (OuterVolumeSpecName: "kube-api-access-8hx8v") pod "fd33c731-bfb7-4848-a754-5d625ecf5bba" (UID: "fd33c731-bfb7-4848-a754-5d625ecf5bba"). InnerVolumeSpecName "kube-api-access-8hx8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.270561 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd33c731-bfb7-4848-a754-5d625ecf5bba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd33c731-bfb7-4848-a754-5d625ecf5bba" (UID: "fd33c731-bfb7-4848-a754-5d625ecf5bba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.288568 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hx8v\" (UniqueName: \"kubernetes.io/projected/fd33c731-bfb7-4848-a754-5d625ecf5bba-kube-api-access-8hx8v\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.288620 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd33c731-bfb7-4848-a754-5d625ecf5bba-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.288635 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd33c731-bfb7-4848-a754-5d625ecf5bba-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.677829 4769 generic.go:334] "Generic (PLEG): container finished" podID="fd33c731-bfb7-4848-a754-5d625ecf5bba" containerID="402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd" exitCode=0 Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.677897 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4ch7j" event={"ID":"fd33c731-bfb7-4848-a754-5d625ecf5bba","Type":"ContainerDied","Data":"402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd"} Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.677950 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4ch7j" event={"ID":"fd33c731-bfb7-4848-a754-5d625ecf5bba","Type":"ContainerDied","Data":"bae13991eecae2672604d3a525d93a264b05608368c936b23d14165d29b04d29"} Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.677983 4769 scope.go:117] "RemoveContainer" containerID="402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.677945 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4ch7j" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.705441 4769 scope.go:117] "RemoveContainer" containerID="9759364875a645c683d41a69d90c9a2b07abf3c89eb37810fc46ead6c870843f" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.717914 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4ch7j"] Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.730040 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4ch7j"] Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.732578 4769 scope.go:117] "RemoveContainer" containerID="860fc263361e31a977e88aa0a3b458d89006aae381c7b2f9b4a0f7ea8acee153" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.758227 4769 scope.go:117] "RemoveContainer" containerID="402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd" Nov 25 09:58:53 crc kubenswrapper[4769]: E1125 09:58:53.759106 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd\": container with ID starting with 402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd not found: ID does not exist" containerID="402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.759188 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd"} err="failed to get container status \"402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd\": rpc error: code = NotFound desc = could not find container \"402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd\": container with ID starting with 402054f28cb636aaeff46c798035c70718bfb4e124d8a41627b532e0b0e3c3dd not found: ID does not exist" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.759238 4769 scope.go:117] "RemoveContainer" containerID="9759364875a645c683d41a69d90c9a2b07abf3c89eb37810fc46ead6c870843f" Nov 25 09:58:53 crc kubenswrapper[4769]: E1125 09:58:53.759679 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9759364875a645c683d41a69d90c9a2b07abf3c89eb37810fc46ead6c870843f\": container with ID starting with 9759364875a645c683d41a69d90c9a2b07abf3c89eb37810fc46ead6c870843f not found: ID does not exist" containerID="9759364875a645c683d41a69d90c9a2b07abf3c89eb37810fc46ead6c870843f" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.759753 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9759364875a645c683d41a69d90c9a2b07abf3c89eb37810fc46ead6c870843f"} err="failed to get container status \"9759364875a645c683d41a69d90c9a2b07abf3c89eb37810fc46ead6c870843f\": rpc error: code = NotFound desc = could not find container \"9759364875a645c683d41a69d90c9a2b07abf3c89eb37810fc46ead6c870843f\": container with ID starting with 9759364875a645c683d41a69d90c9a2b07abf3c89eb37810fc46ead6c870843f not found: ID does not exist" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.759798 4769 scope.go:117] "RemoveContainer" containerID="860fc263361e31a977e88aa0a3b458d89006aae381c7b2f9b4a0f7ea8acee153" Nov 25 09:58:53 crc kubenswrapper[4769]: E1125 09:58:53.760095 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"860fc263361e31a977e88aa0a3b458d89006aae381c7b2f9b4a0f7ea8acee153\": container with ID starting with 860fc263361e31a977e88aa0a3b458d89006aae381c7b2f9b4a0f7ea8acee153 not found: ID does not exist" containerID="860fc263361e31a977e88aa0a3b458d89006aae381c7b2f9b4a0f7ea8acee153" Nov 25 09:58:53 crc kubenswrapper[4769]: I1125 09:58:53.760118 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"860fc263361e31a977e88aa0a3b458d89006aae381c7b2f9b4a0f7ea8acee153"} err="failed to get container status \"860fc263361e31a977e88aa0a3b458d89006aae381c7b2f9b4a0f7ea8acee153\": rpc error: code = NotFound desc = could not find container \"860fc263361e31a977e88aa0a3b458d89006aae381c7b2f9b4a0f7ea8acee153\": container with ID starting with 860fc263361e31a977e88aa0a3b458d89006aae381c7b2f9b4a0f7ea8acee153 not found: ID does not exist" Nov 25 09:58:54 crc kubenswrapper[4769]: I1125 09:58:54.247814 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd33c731-bfb7-4848-a754-5d625ecf5bba" path="/var/lib/kubelet/pods/fd33c731-bfb7-4848-a754-5d625ecf5bba/volumes" Nov 25 09:59:21 crc kubenswrapper[4769]: I1125 09:59:21.963162 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8"] Nov 25 09:59:21 crc kubenswrapper[4769]: E1125 09:59:21.964160 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd33c731-bfb7-4848-a754-5d625ecf5bba" containerName="registry-server" Nov 25 09:59:21 crc kubenswrapper[4769]: I1125 09:59:21.964174 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd33c731-bfb7-4848-a754-5d625ecf5bba" containerName="registry-server" Nov 25 09:59:21 crc kubenswrapper[4769]: E1125 09:59:21.964199 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd33c731-bfb7-4848-a754-5d625ecf5bba" containerName="extract-utilities" Nov 25 09:59:21 crc kubenswrapper[4769]: I1125 09:59:21.964208 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd33c731-bfb7-4848-a754-5d625ecf5bba" containerName="extract-utilities" Nov 25 09:59:21 crc kubenswrapper[4769]: E1125 09:59:21.964229 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd33c731-bfb7-4848-a754-5d625ecf5bba" containerName="extract-content" Nov 25 09:59:21 crc kubenswrapper[4769]: I1125 09:59:21.964236 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd33c731-bfb7-4848-a754-5d625ecf5bba" containerName="extract-content" Nov 25 09:59:21 crc kubenswrapper[4769]: I1125 09:59:21.964369 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd33c731-bfb7-4848-a754-5d625ecf5bba" containerName="registry-server" Nov 25 09:59:21 crc kubenswrapper[4769]: I1125 09:59:21.965597 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:21 crc kubenswrapper[4769]: I1125 09:59:21.968327 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:59:21 crc kubenswrapper[4769]: I1125 09:59:21.988709 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8"] Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.066107 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntfv8\" (UniqueName: \"kubernetes.io/projected/5b4d5ade-9f98-49a6-a236-dada2b731c5a-kube-api-access-ntfv8\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8\" (UID: \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.066547 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5b4d5ade-9f98-49a6-a236-dada2b731c5a-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8\" (UID: \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.066668 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5b4d5ade-9f98-49a6-a236-dada2b731c5a-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8\" (UID: \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.168700 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5b4d5ade-9f98-49a6-a236-dada2b731c5a-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8\" (UID: \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.168805 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntfv8\" (UniqueName: \"kubernetes.io/projected/5b4d5ade-9f98-49a6-a236-dada2b731c5a-kube-api-access-ntfv8\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8\" (UID: \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.168892 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5b4d5ade-9f98-49a6-a236-dada2b731c5a-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8\" (UID: \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.169338 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5b4d5ade-9f98-49a6-a236-dada2b731c5a-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8\" (UID: \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.169421 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5b4d5ade-9f98-49a6-a236-dada2b731c5a-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8\" (UID: \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.192887 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntfv8\" (UniqueName: \"kubernetes.io/projected/5b4d5ade-9f98-49a6-a236-dada2b731c5a-kube-api-access-ntfv8\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8\" (UID: \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.285948 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.289946 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.290078 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.777155 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8"] Nov 25 09:59:22 crc kubenswrapper[4769]: I1125 09:59:22.963012 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" event={"ID":"5b4d5ade-9f98-49a6-a236-dada2b731c5a","Type":"ContainerStarted","Data":"c8ea80432cedb4ea7a4f45faf74a2dcb4ae13b25b89c1f0ff76712ad2c5fb09e"} Nov 25 09:59:23 crc kubenswrapper[4769]: I1125 09:59:23.972130 4769 generic.go:334] "Generic (PLEG): container finished" podID="5b4d5ade-9f98-49a6-a236-dada2b731c5a" containerID="a6c67c0e27991a6f0683f57c8d48b86c9040522ca9a6b70541b9b54d302ecc9f" exitCode=0 Nov 25 09:59:23 crc kubenswrapper[4769]: I1125 09:59:23.972184 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" event={"ID":"5b4d5ade-9f98-49a6-a236-dada2b731c5a","Type":"ContainerDied","Data":"a6c67c0e27991a6f0683f57c8d48b86c9040522ca9a6b70541b9b54d302ecc9f"} Nov 25 09:59:25 crc kubenswrapper[4769]: I1125 09:59:25.996362 4769 generic.go:334] "Generic (PLEG): container finished" podID="5b4d5ade-9f98-49a6-a236-dada2b731c5a" containerID="313b0271f1509995a0d787d7a073df9d444a0c7db49fb0ceb7e81aef9425d88a" exitCode=0 Nov 25 09:59:25 crc kubenswrapper[4769]: I1125 09:59:25.996475 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" event={"ID":"5b4d5ade-9f98-49a6-a236-dada2b731c5a","Type":"ContainerDied","Data":"313b0271f1509995a0d787d7a073df9d444a0c7db49fb0ceb7e81aef9425d88a"} Nov 25 09:59:27 crc kubenswrapper[4769]: I1125 09:59:27.060329 4769 generic.go:334] "Generic (PLEG): container finished" podID="5b4d5ade-9f98-49a6-a236-dada2b731c5a" containerID="6922be2eca41354616a11570e5fc63cfcdb7251faa203362ef112ba85dcd1c00" exitCode=0 Nov 25 09:59:27 crc kubenswrapper[4769]: I1125 09:59:27.060747 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" event={"ID":"5b4d5ade-9f98-49a6-a236-dada2b731c5a","Type":"ContainerDied","Data":"6922be2eca41354616a11570e5fc63cfcdb7251faa203362ef112ba85dcd1c00"} Nov 25 09:59:28 crc kubenswrapper[4769]: I1125 09:59:28.390008 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:28 crc kubenswrapper[4769]: I1125 09:59:28.503781 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntfv8\" (UniqueName: \"kubernetes.io/projected/5b4d5ade-9f98-49a6-a236-dada2b731c5a-kube-api-access-ntfv8\") pod \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\" (UID: \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\") " Nov 25 09:59:28 crc kubenswrapper[4769]: I1125 09:59:28.503837 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5b4d5ade-9f98-49a6-a236-dada2b731c5a-util\") pod \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\" (UID: \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\") " Nov 25 09:59:28 crc kubenswrapper[4769]: I1125 09:59:28.503946 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5b4d5ade-9f98-49a6-a236-dada2b731c5a-bundle\") pod \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\" (UID: \"5b4d5ade-9f98-49a6-a236-dada2b731c5a\") " Nov 25 09:59:28 crc kubenswrapper[4769]: I1125 09:59:28.504875 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b4d5ade-9f98-49a6-a236-dada2b731c5a-bundle" (OuterVolumeSpecName: "bundle") pod "5b4d5ade-9f98-49a6-a236-dada2b731c5a" (UID: "5b4d5ade-9f98-49a6-a236-dada2b731c5a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:28 crc kubenswrapper[4769]: I1125 09:59:28.520196 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b4d5ade-9f98-49a6-a236-dada2b731c5a-kube-api-access-ntfv8" (OuterVolumeSpecName: "kube-api-access-ntfv8") pod "5b4d5ade-9f98-49a6-a236-dada2b731c5a" (UID: "5b4d5ade-9f98-49a6-a236-dada2b731c5a"). InnerVolumeSpecName "kube-api-access-ntfv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:59:28 crc kubenswrapper[4769]: I1125 09:59:28.606156 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntfv8\" (UniqueName: \"kubernetes.io/projected/5b4d5ade-9f98-49a6-a236-dada2b731c5a-kube-api-access-ntfv8\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:28 crc kubenswrapper[4769]: I1125 09:59:28.606202 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5b4d5ade-9f98-49a6-a236-dada2b731c5a-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:28 crc kubenswrapper[4769]: I1125 09:59:28.631388 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b4d5ade-9f98-49a6-a236-dada2b731c5a-util" (OuterVolumeSpecName: "util") pod "5b4d5ade-9f98-49a6-a236-dada2b731c5a" (UID: "5b4d5ade-9f98-49a6-a236-dada2b731c5a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:59:28 crc kubenswrapper[4769]: I1125 09:59:28.708165 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5b4d5ade-9f98-49a6-a236-dada2b731c5a-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:59:29 crc kubenswrapper[4769]: I1125 09:59:29.079741 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" event={"ID":"5b4d5ade-9f98-49a6-a236-dada2b731c5a","Type":"ContainerDied","Data":"c8ea80432cedb4ea7a4f45faf74a2dcb4ae13b25b89c1f0ff76712ad2c5fb09e"} Nov 25 09:59:29 crc kubenswrapper[4769]: I1125 09:59:29.080116 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8ea80432cedb4ea7a4f45faf74a2dcb4ae13b25b89c1f0ff76712ad2c5fb09e" Nov 25 09:59:29 crc kubenswrapper[4769]: I1125 09:59:29.079811 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.358939 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-fqxhf"] Nov 25 09:59:33 crc kubenswrapper[4769]: E1125 09:59:33.359887 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b4d5ade-9f98-49a6-a236-dada2b731c5a" containerName="util" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.359919 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b4d5ade-9f98-49a6-a236-dada2b731c5a" containerName="util" Nov 25 09:59:33 crc kubenswrapper[4769]: E1125 09:59:33.359933 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b4d5ade-9f98-49a6-a236-dada2b731c5a" containerName="extract" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.359944 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b4d5ade-9f98-49a6-a236-dada2b731c5a" containerName="extract" Nov 25 09:59:33 crc kubenswrapper[4769]: E1125 09:59:33.359960 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b4d5ade-9f98-49a6-a236-dada2b731c5a" containerName="pull" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.359996 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b4d5ade-9f98-49a6-a236-dada2b731c5a" containerName="pull" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.360252 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b4d5ade-9f98-49a6-a236-dada2b731c5a" containerName="extract" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.361558 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-fqxhf" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.364655 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-j5zw9" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.365214 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.365461 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.383057 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-fqxhf"] Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.398226 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5cpf\" (UniqueName: \"kubernetes.io/projected/6e6ef328-24f7-4e03-a863-e369abbf53e0-kube-api-access-n5cpf\") pod \"nmstate-operator-557fdffb88-fqxhf\" (UID: \"6e6ef328-24f7-4e03-a863-e369abbf53e0\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-fqxhf" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.502566 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5cpf\" (UniqueName: \"kubernetes.io/projected/6e6ef328-24f7-4e03-a863-e369abbf53e0-kube-api-access-n5cpf\") pod \"nmstate-operator-557fdffb88-fqxhf\" (UID: \"6e6ef328-24f7-4e03-a863-e369abbf53e0\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-fqxhf" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.527357 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5cpf\" (UniqueName: \"kubernetes.io/projected/6e6ef328-24f7-4e03-a863-e369abbf53e0-kube-api-access-n5cpf\") pod \"nmstate-operator-557fdffb88-fqxhf\" (UID: \"6e6ef328-24f7-4e03-a863-e369abbf53e0\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-fqxhf" Nov 25 09:59:33 crc kubenswrapper[4769]: I1125 09:59:33.701552 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-fqxhf" Nov 25 09:59:34 crc kubenswrapper[4769]: I1125 09:59:34.320642 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-fqxhf"] Nov 25 09:59:35 crc kubenswrapper[4769]: I1125 09:59:35.141887 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-fqxhf" event={"ID":"6e6ef328-24f7-4e03-a863-e369abbf53e0","Type":"ContainerStarted","Data":"873f82334e6aa9e48db9d95e5ae2efa2b6235f06cf9d677d5d32626ec15ff0f9"} Nov 25 09:59:37 crc kubenswrapper[4769]: I1125 09:59:37.160839 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-fqxhf" event={"ID":"6e6ef328-24f7-4e03-a863-e369abbf53e0","Type":"ContainerStarted","Data":"17eb6f777c07ebffcb2d60733e9d35eb0ab4d8266b4ca3dc801345aaa1fe6d9e"} Nov 25 09:59:37 crc kubenswrapper[4769]: I1125 09:59:37.185937 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-fqxhf" podStartSLOduration=1.576219274 podStartE2EDuration="4.185916638s" podCreationTimestamp="2025-11-25 09:59:33 +0000 UTC" firstStartedPulling="2025-11-25 09:59:34.337046826 +0000 UTC m=+922.922019139" lastFinishedPulling="2025-11-25 09:59:36.94674419 +0000 UTC m=+925.531716503" observedRunningTime="2025-11-25 09:59:37.179735219 +0000 UTC m=+925.764707532" watchObservedRunningTime="2025-11-25 09:59:37.185916638 +0000 UTC m=+925.770888951" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.274686 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fvq2h"] Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.276857 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fvq2h" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.279866 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-dnsfl" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.291868 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fvq2h"] Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.302124 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp"] Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.303411 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q2fc\" (UniqueName: \"kubernetes.io/projected/ee0bb8ab-c782-49d9-81eb-c2c0a6a8ce52-kube-api-access-4q2fc\") pod \"nmstate-metrics-5dcf9c57c5-fvq2h\" (UID: \"ee0bb8ab-c782-49d9-81eb-c2c0a6a8ce52\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fvq2h" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.303514 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.306717 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.345497 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp"] Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.354700 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-9ctgr"] Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.356918 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.406479 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/1b7e501c-cb93-4182-a15f-70db4cb62704-nmstate-lock\") pod \"nmstate-handler-9ctgr\" (UID: \"1b7e501c-cb93-4182-a15f-70db4cb62704\") " pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.406566 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q2fc\" (UniqueName: \"kubernetes.io/projected/ee0bb8ab-c782-49d9-81eb-c2c0a6a8ce52-kube-api-access-4q2fc\") pod \"nmstate-metrics-5dcf9c57c5-fvq2h\" (UID: \"ee0bb8ab-c782-49d9-81eb-c2c0a6a8ce52\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fvq2h" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.406604 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/1b7e501c-cb93-4182-a15f-70db4cb62704-dbus-socket\") pod \"nmstate-handler-9ctgr\" (UID: \"1b7e501c-cb93-4182-a15f-70db4cb62704\") " pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.406648 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/1b7e501c-cb93-4182-a15f-70db4cb62704-ovs-socket\") pod \"nmstate-handler-9ctgr\" (UID: \"1b7e501c-cb93-4182-a15f-70db4cb62704\") " pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.406700 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wfzs\" (UniqueName: \"kubernetes.io/projected/1b7e501c-cb93-4182-a15f-70db4cb62704-kube-api-access-8wfzs\") pod \"nmstate-handler-9ctgr\" (UID: \"1b7e501c-cb93-4182-a15f-70db4cb62704\") " pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.406735 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqg9h\" (UniqueName: \"kubernetes.io/projected/467be990-d3c4-447c-a994-9b7caf5c0b44-kube-api-access-jqg9h\") pod \"nmstate-webhook-6b89b748d8-zljjp\" (UID: \"467be990-d3c4-447c-a994-9b7caf5c0b44\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.406772 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/467be990-d3c4-447c-a994-9b7caf5c0b44-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-zljjp\" (UID: \"467be990-d3c4-447c-a994-9b7caf5c0b44\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.432571 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q2fc\" (UniqueName: \"kubernetes.io/projected/ee0bb8ab-c782-49d9-81eb-c2c0a6a8ce52-kube-api-access-4q2fc\") pod \"nmstate-metrics-5dcf9c57c5-fvq2h\" (UID: \"ee0bb8ab-c782-49d9-81eb-c2c0a6a8ce52\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fvq2h" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.476887 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7"] Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.477998 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.480435 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.480725 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-d89x5" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.489087 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7"] Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.492122 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.508303 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/467be990-d3c4-447c-a994-9b7caf5c0b44-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-zljjp\" (UID: \"467be990-d3c4-447c-a994-9b7caf5c0b44\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.508396 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g78fx\" (UniqueName: \"kubernetes.io/projected/b3842e94-b875-49e0-9041-51a64f4c496d-kube-api-access-g78fx\") pod \"nmstate-console-plugin-5874bd7bc5-cfwf7\" (UID: \"b3842e94-b875-49e0-9041-51a64f4c496d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.508452 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b3842e94-b875-49e0-9041-51a64f4c496d-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-cfwf7\" (UID: \"b3842e94-b875-49e0-9041-51a64f4c496d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.508477 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/1b7e501c-cb93-4182-a15f-70db4cb62704-nmstate-lock\") pod \"nmstate-handler-9ctgr\" (UID: \"1b7e501c-cb93-4182-a15f-70db4cb62704\") " pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.508512 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/1b7e501c-cb93-4182-a15f-70db4cb62704-dbus-socket\") pod \"nmstate-handler-9ctgr\" (UID: \"1b7e501c-cb93-4182-a15f-70db4cb62704\") " pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.508536 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b3842e94-b875-49e0-9041-51a64f4c496d-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-cfwf7\" (UID: \"b3842e94-b875-49e0-9041-51a64f4c496d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.508568 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/1b7e501c-cb93-4182-a15f-70db4cb62704-ovs-socket\") pod \"nmstate-handler-9ctgr\" (UID: \"1b7e501c-cb93-4182-a15f-70db4cb62704\") " pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.508628 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wfzs\" (UniqueName: \"kubernetes.io/projected/1b7e501c-cb93-4182-a15f-70db4cb62704-kube-api-access-8wfzs\") pod \"nmstate-handler-9ctgr\" (UID: \"1b7e501c-cb93-4182-a15f-70db4cb62704\") " pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.508657 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqg9h\" (UniqueName: \"kubernetes.io/projected/467be990-d3c4-447c-a994-9b7caf5c0b44-kube-api-access-jqg9h\") pod \"nmstate-webhook-6b89b748d8-zljjp\" (UID: \"467be990-d3c4-447c-a994-9b7caf5c0b44\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.510345 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/1b7e501c-cb93-4182-a15f-70db4cb62704-dbus-socket\") pod \"nmstate-handler-9ctgr\" (UID: \"1b7e501c-cb93-4182-a15f-70db4cb62704\") " pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.510442 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/1b7e501c-cb93-4182-a15f-70db4cb62704-ovs-socket\") pod \"nmstate-handler-9ctgr\" (UID: \"1b7e501c-cb93-4182-a15f-70db4cb62704\") " pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.510662 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/1b7e501c-cb93-4182-a15f-70db4cb62704-nmstate-lock\") pod \"nmstate-handler-9ctgr\" (UID: \"1b7e501c-cb93-4182-a15f-70db4cb62704\") " pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.516342 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/467be990-d3c4-447c-a994-9b7caf5c0b44-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-zljjp\" (UID: \"467be990-d3c4-447c-a994-9b7caf5c0b44\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.533016 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wfzs\" (UniqueName: \"kubernetes.io/projected/1b7e501c-cb93-4182-a15f-70db4cb62704-kube-api-access-8wfzs\") pod \"nmstate-handler-9ctgr\" (UID: \"1b7e501c-cb93-4182-a15f-70db4cb62704\") " pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.533288 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqg9h\" (UniqueName: \"kubernetes.io/projected/467be990-d3c4-447c-a994-9b7caf5c0b44-kube-api-access-jqg9h\") pod \"nmstate-webhook-6b89b748d8-zljjp\" (UID: \"467be990-d3c4-447c-a994-9b7caf5c0b44\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.605107 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fvq2h" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.611118 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g78fx\" (UniqueName: \"kubernetes.io/projected/b3842e94-b875-49e0-9041-51a64f4c496d-kube-api-access-g78fx\") pod \"nmstate-console-plugin-5874bd7bc5-cfwf7\" (UID: \"b3842e94-b875-49e0-9041-51a64f4c496d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.611176 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b3842e94-b875-49e0-9041-51a64f4c496d-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-cfwf7\" (UID: \"b3842e94-b875-49e0-9041-51a64f4c496d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.611212 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b3842e94-b875-49e0-9041-51a64f4c496d-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-cfwf7\" (UID: \"b3842e94-b875-49e0-9041-51a64f4c496d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" Nov 25 09:59:43 crc kubenswrapper[4769]: E1125 09:59:43.611480 4769 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 25 09:59:43 crc kubenswrapper[4769]: E1125 09:59:43.611631 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3842e94-b875-49e0-9041-51a64f4c496d-plugin-serving-cert podName:b3842e94-b875-49e0-9041-51a64f4c496d nodeName:}" failed. No retries permitted until 2025-11-25 09:59:44.111605252 +0000 UTC m=+932.696577565 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/b3842e94-b875-49e0-9041-51a64f4c496d-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-cfwf7" (UID: "b3842e94-b875-49e0-9041-51a64f4c496d") : secret "plugin-serving-cert" not found Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.612075 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b3842e94-b875-49e0-9041-51a64f4c496d-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-cfwf7\" (UID: \"b3842e94-b875-49e0-9041-51a64f4c496d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.618064 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.641389 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g78fx\" (UniqueName: \"kubernetes.io/projected/b3842e94-b875-49e0-9041-51a64f4c496d-kube-api-access-g78fx\") pod \"nmstate-console-plugin-5874bd7bc5-cfwf7\" (UID: \"b3842e94-b875-49e0-9041-51a64f4c496d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.683490 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.722687 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5b87c94878-dctn4"] Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.723976 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.748662 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.769277 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5b87c94878-dctn4"] Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.819503 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-service-ca\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.819560 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-config\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.819699 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-trusted-ca-bundle\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.819751 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-serving-cert\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.819779 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-oauth-config\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.819989 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-oauth-serving-cert\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.820013 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slr5v\" (UniqueName: \"kubernetes.io/projected/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-kube-api-access-slr5v\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.925752 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-trusted-ca-bundle\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.925869 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-serving-cert\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.925895 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-oauth-config\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.925930 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-oauth-serving-cert\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.925947 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slr5v\" (UniqueName: \"kubernetes.io/projected/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-kube-api-access-slr5v\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.926034 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-service-ca\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.926068 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-config\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.927332 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-trusted-ca-bundle\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.927371 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-oauth-serving-cert\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.927364 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-config\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.928239 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-service-ca\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.931887 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-serving-cert\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.933833 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-oauth-config\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:43 crc kubenswrapper[4769]: I1125 09:59:43.949222 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slr5v\" (UniqueName: \"kubernetes.io/projected/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-kube-api-access-slr5v\") pod \"console-5b87c94878-dctn4\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:44 crc kubenswrapper[4769]: I1125 09:59:44.064562 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:44 crc kubenswrapper[4769]: I1125 09:59:44.140781 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b3842e94-b875-49e0-9041-51a64f4c496d-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-cfwf7\" (UID: \"b3842e94-b875-49e0-9041-51a64f4c496d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" Nov 25 09:59:44 crc kubenswrapper[4769]: I1125 09:59:44.145903 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b3842e94-b875-49e0-9041-51a64f4c496d-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-cfwf7\" (UID: \"b3842e94-b875-49e0-9041-51a64f4c496d\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" Nov 25 09:59:44 crc kubenswrapper[4769]: I1125 09:59:44.248258 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-9ctgr" event={"ID":"1b7e501c-cb93-4182-a15f-70db4cb62704","Type":"ContainerStarted","Data":"d81ff3fc305cc373a30ea4dfa521a69ad0be5d2f0fc3317a9763bc8091505b94"} Nov 25 09:59:44 crc kubenswrapper[4769]: I1125 09:59:44.248305 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp"] Nov 25 09:59:44 crc kubenswrapper[4769]: I1125 09:59:44.311951 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fvq2h"] Nov 25 09:59:44 crc kubenswrapper[4769]: I1125 09:59:44.399739 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" Nov 25 09:59:44 crc kubenswrapper[4769]: I1125 09:59:44.561932 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5b87c94878-dctn4"] Nov 25 09:59:44 crc kubenswrapper[4769]: W1125 09:59:44.579237 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb18d6f0_fbf5_4f14_8677_2cbf4a27ac4d.slice/crio-12b1401610ff3b450bfa7ec5f8a76f7090d14954e9e2bbd327fd278eb46b14c2 WatchSource:0}: Error finding container 12b1401610ff3b450bfa7ec5f8a76f7090d14954e9e2bbd327fd278eb46b14c2: Status 404 returned error can't find the container with id 12b1401610ff3b450bfa7ec5f8a76f7090d14954e9e2bbd327fd278eb46b14c2 Nov 25 09:59:44 crc kubenswrapper[4769]: I1125 09:59:44.852605 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7"] Nov 25 09:59:45 crc kubenswrapper[4769]: I1125 09:59:45.251824 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" event={"ID":"b3842e94-b875-49e0-9041-51a64f4c496d","Type":"ContainerStarted","Data":"be9e78a244aa080207f9557c16503ac67fc87637da69768974b3a88c7762df97"} Nov 25 09:59:45 crc kubenswrapper[4769]: I1125 09:59:45.253870 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" event={"ID":"467be990-d3c4-447c-a994-9b7caf5c0b44","Type":"ContainerStarted","Data":"2da71dc68a03c395e01ae3492d3fff9e1030ef9931fe2252489cd66da44f75fc"} Nov 25 09:59:45 crc kubenswrapper[4769]: I1125 09:59:45.256308 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5b87c94878-dctn4" event={"ID":"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d","Type":"ContainerStarted","Data":"cf7e8896f13a63165b978bb5c787e4d3db2b033bb5fbf646b2f283f9dd5c25e5"} Nov 25 09:59:45 crc kubenswrapper[4769]: I1125 09:59:45.256379 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5b87c94878-dctn4" event={"ID":"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d","Type":"ContainerStarted","Data":"12b1401610ff3b450bfa7ec5f8a76f7090d14954e9e2bbd327fd278eb46b14c2"} Nov 25 09:59:45 crc kubenswrapper[4769]: I1125 09:59:45.257669 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fvq2h" event={"ID":"ee0bb8ab-c782-49d9-81eb-c2c0a6a8ce52","Type":"ContainerStarted","Data":"0ae1b4a9abc2feda9950c64cbc534ca85a014859a93ba2c28e13989677b69d89"} Nov 25 09:59:45 crc kubenswrapper[4769]: I1125 09:59:45.283768 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5b87c94878-dctn4" podStartSLOduration=2.283739061 podStartE2EDuration="2.283739061s" podCreationTimestamp="2025-11-25 09:59:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:59:45.282296651 +0000 UTC m=+933.867268994" watchObservedRunningTime="2025-11-25 09:59:45.283739061 +0000 UTC m=+933.868711394" Nov 25 09:59:47 crc kubenswrapper[4769]: I1125 09:59:47.483007 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-9ctgr" event={"ID":"1b7e501c-cb93-4182-a15f-70db4cb62704","Type":"ContainerStarted","Data":"04487f403cd1efcfc1c73eddcb9d65464e773ecc27964837408b3561716586fc"} Nov 25 09:59:47 crc kubenswrapper[4769]: I1125 09:59:47.483715 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:47 crc kubenswrapper[4769]: I1125 09:59:47.486587 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" event={"ID":"467be990-d3c4-447c-a994-9b7caf5c0b44","Type":"ContainerStarted","Data":"f7b95ed723f478c1ba39d556c06688915ac463556bf2ac46fa0b69be1d8fc280"} Nov 25 09:59:47 crc kubenswrapper[4769]: I1125 09:59:47.490634 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fvq2h" event={"ID":"ee0bb8ab-c782-49d9-81eb-c2c0a6a8ce52","Type":"ContainerStarted","Data":"442f9c99f6b9f01d0fa2bf315dbf28665d851a6fadc0be7507d5c0256b5753f3"} Nov 25 09:59:47 crc kubenswrapper[4769]: I1125 09:59:47.504444 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-9ctgr" podStartSLOduration=1.4975630340000001 podStartE2EDuration="4.504422978s" podCreationTimestamp="2025-11-25 09:59:43 +0000 UTC" firstStartedPulling="2025-11-25 09:59:43.748392673 +0000 UTC m=+932.333364986" lastFinishedPulling="2025-11-25 09:59:46.755252617 +0000 UTC m=+935.340224930" observedRunningTime="2025-11-25 09:59:47.502138046 +0000 UTC m=+936.087110389" watchObservedRunningTime="2025-11-25 09:59:47.504422978 +0000 UTC m=+936.089395301" Nov 25 09:59:47 crc kubenswrapper[4769]: I1125 09:59:47.523732 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" podStartSLOduration=2.007678461 podStartE2EDuration="4.523709737s" podCreationTimestamp="2025-11-25 09:59:43 +0000 UTC" firstStartedPulling="2025-11-25 09:59:44.251504618 +0000 UTC m=+932.836476931" lastFinishedPulling="2025-11-25 09:59:46.767535884 +0000 UTC m=+935.352508207" observedRunningTime="2025-11-25 09:59:47.519204594 +0000 UTC m=+936.104176917" watchObservedRunningTime="2025-11-25 09:59:47.523709737 +0000 UTC m=+936.108682060" Nov 25 09:59:48 crc kubenswrapper[4769]: I1125 09:59:48.499584 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" Nov 25 09:59:49 crc kubenswrapper[4769]: I1125 09:59:49.509035 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" event={"ID":"b3842e94-b875-49e0-9041-51a64f4c496d","Type":"ContainerStarted","Data":"f15bac760d832e13d75d2006fcd48dfd6838aa023ee73bb01155383cc2b6e6f7"} Nov 25 09:59:49 crc kubenswrapper[4769]: I1125 09:59:49.527758 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cfwf7" podStartSLOduration=3.035962595 podStartE2EDuration="6.527735885s" podCreationTimestamp="2025-11-25 09:59:43 +0000 UTC" firstStartedPulling="2025-11-25 09:59:44.868084734 +0000 UTC m=+933.453057047" lastFinishedPulling="2025-11-25 09:59:48.359858024 +0000 UTC m=+936.944830337" observedRunningTime="2025-11-25 09:59:49.526429889 +0000 UTC m=+938.111402222" watchObservedRunningTime="2025-11-25 09:59:49.527735885 +0000 UTC m=+938.112708198" Nov 25 09:59:50 crc kubenswrapper[4769]: I1125 09:59:50.521015 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fvq2h" event={"ID":"ee0bb8ab-c782-49d9-81eb-c2c0a6a8ce52","Type":"ContainerStarted","Data":"d1d905c92e61a4b7c99e584beb9c40c8121b23a14218eadd87ceb6e98f41d994"} Nov 25 09:59:50 crc kubenswrapper[4769]: I1125 09:59:50.545046 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fvq2h" podStartSLOduration=2.106437419 podStartE2EDuration="7.545026698s" podCreationTimestamp="2025-11-25 09:59:43 +0000 UTC" firstStartedPulling="2025-11-25 09:59:44.325657061 +0000 UTC m=+932.910629374" lastFinishedPulling="2025-11-25 09:59:49.76424634 +0000 UTC m=+938.349218653" observedRunningTime="2025-11-25 09:59:50.540596806 +0000 UTC m=+939.125569119" watchObservedRunningTime="2025-11-25 09:59:50.545026698 +0000 UTC m=+939.129999011" Nov 25 09:59:52 crc kubenswrapper[4769]: I1125 09:59:52.291061 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:59:52 crc kubenswrapper[4769]: I1125 09:59:52.291180 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:59:53 crc kubenswrapper[4769]: I1125 09:59:53.710937 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-9ctgr" Nov 25 09:59:54 crc kubenswrapper[4769]: I1125 09:59:54.066776 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:54 crc kubenswrapper[4769]: I1125 09:59:54.067686 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:54 crc kubenswrapper[4769]: I1125 09:59:54.072880 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:54 crc kubenswrapper[4769]: I1125 09:59:54.569195 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5b87c94878-dctn4" Nov 25 09:59:54 crc kubenswrapper[4769]: I1125 09:59:54.641108 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-78789f5b5d-sxnbq"] Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.157266 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv"] Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.159433 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.162110 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.162413 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.173262 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv"] Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.236428 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-secret-volume\") pod \"collect-profiles-29401080-9ntlv\" (UID: \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.236582 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhvpq\" (UniqueName: \"kubernetes.io/projected/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-kube-api-access-bhvpq\") pod \"collect-profiles-29401080-9ntlv\" (UID: \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.238127 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-config-volume\") pod \"collect-profiles-29401080-9ntlv\" (UID: \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.340884 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-secret-volume\") pod \"collect-profiles-29401080-9ntlv\" (UID: \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.341046 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhvpq\" (UniqueName: \"kubernetes.io/projected/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-kube-api-access-bhvpq\") pod \"collect-profiles-29401080-9ntlv\" (UID: \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.341386 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-config-volume\") pod \"collect-profiles-29401080-9ntlv\" (UID: \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.343535 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-config-volume\") pod \"collect-profiles-29401080-9ntlv\" (UID: \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.349705 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-secret-volume\") pod \"collect-profiles-29401080-9ntlv\" (UID: \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.363810 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhvpq\" (UniqueName: \"kubernetes.io/projected/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-kube-api-access-bhvpq\") pod \"collect-profiles-29401080-9ntlv\" (UID: \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.497019 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:00 crc kubenswrapper[4769]: I1125 10:00:00.976911 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv"] Nov 25 10:00:00 crc kubenswrapper[4769]: W1125 10:00:00.985512 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3ebbb140_bc0e_47b2_9d3e_b8db0f609729.slice/crio-bb26ef407df8d3e2134d957f0e68096888a8aacbc1f3333a5f1c44129a430afa WatchSource:0}: Error finding container bb26ef407df8d3e2134d957f0e68096888a8aacbc1f3333a5f1c44129a430afa: Status 404 returned error can't find the container with id bb26ef407df8d3e2134d957f0e68096888a8aacbc1f3333a5f1c44129a430afa Nov 25 10:00:01 crc kubenswrapper[4769]: I1125 10:00:01.653583 4769 generic.go:334] "Generic (PLEG): container finished" podID="3ebbb140-bc0e-47b2-9d3e-b8db0f609729" containerID="229f9e06c806c3ae721029fd48a755938572a4b374f11c6269067d347ee130c2" exitCode=0 Nov 25 10:00:01 crc kubenswrapper[4769]: I1125 10:00:01.653818 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" event={"ID":"3ebbb140-bc0e-47b2-9d3e-b8db0f609729","Type":"ContainerDied","Data":"229f9e06c806c3ae721029fd48a755938572a4b374f11c6269067d347ee130c2"} Nov 25 10:00:01 crc kubenswrapper[4769]: I1125 10:00:01.656065 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" event={"ID":"3ebbb140-bc0e-47b2-9d3e-b8db0f609729","Type":"ContainerStarted","Data":"bb26ef407df8d3e2134d957f0e68096888a8aacbc1f3333a5f1c44129a430afa"} Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.074026 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.197790 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-config-volume\") pod \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\" (UID: \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\") " Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.197977 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhvpq\" (UniqueName: \"kubernetes.io/projected/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-kube-api-access-bhvpq\") pod \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\" (UID: \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\") " Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.198025 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-secret-volume\") pod \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\" (UID: \"3ebbb140-bc0e-47b2-9d3e-b8db0f609729\") " Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.198657 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-config-volume" (OuterVolumeSpecName: "config-volume") pod "3ebbb140-bc0e-47b2-9d3e-b8db0f609729" (UID: "3ebbb140-bc0e-47b2-9d3e-b8db0f609729"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.204773 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-kube-api-access-bhvpq" (OuterVolumeSpecName: "kube-api-access-bhvpq") pod "3ebbb140-bc0e-47b2-9d3e-b8db0f609729" (UID: "3ebbb140-bc0e-47b2-9d3e-b8db0f609729"). InnerVolumeSpecName "kube-api-access-bhvpq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.205761 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3ebbb140-bc0e-47b2-9d3e-b8db0f609729" (UID: "3ebbb140-bc0e-47b2-9d3e-b8db0f609729"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.300535 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.300577 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhvpq\" (UniqueName: \"kubernetes.io/projected/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-kube-api-access-bhvpq\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.300595 4769 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ebbb140-bc0e-47b2-9d3e-b8db0f609729-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.632309 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zljjp" Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.708618 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" event={"ID":"3ebbb140-bc0e-47b2-9d3e-b8db0f609729","Type":"ContainerDied","Data":"bb26ef407df8d3e2134d957f0e68096888a8aacbc1f3333a5f1c44129a430afa"} Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.708687 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb26ef407df8d3e2134d957f0e68096888a8aacbc1f3333a5f1c44129a430afa" Nov 25 10:00:03 crc kubenswrapper[4769]: I1125 10:00:03.708801 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv" Nov 25 10:00:19 crc kubenswrapper[4769]: I1125 10:00:19.709167 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-78789f5b5d-sxnbq" podUID="62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" containerName="console" containerID="cri-o://3b2b57e5b9fb9a88e4081fb58f7886a7c02655a9c722c3341e810984cd8955c7" gracePeriod=15 Nov 25 10:00:19 crc kubenswrapper[4769]: I1125 10:00:19.898086 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-78789f5b5d-sxnbq_62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812/console/0.log" Nov 25 10:00:19 crc kubenswrapper[4769]: I1125 10:00:19.898417 4769 generic.go:334] "Generic (PLEG): container finished" podID="62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" containerID="3b2b57e5b9fb9a88e4081fb58f7886a7c02655a9c722c3341e810984cd8955c7" exitCode=2 Nov 25 10:00:19 crc kubenswrapper[4769]: I1125 10:00:19.898472 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-78789f5b5d-sxnbq" event={"ID":"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812","Type":"ContainerDied","Data":"3b2b57e5b9fb9a88e4081fb58f7886a7c02655a9c722c3341e810984cd8955c7"} Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.193314 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-78789f5b5d-sxnbq_62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812/console/0.log" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.193381 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.313032 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-config\") pod \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.313397 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-oauth-config\") pod \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.313620 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8r4r\" (UniqueName: \"kubernetes.io/projected/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-kube-api-access-q8r4r\") pod \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.313671 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-oauth-serving-cert\") pod \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.313733 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-trusted-ca-bundle\") pod \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.313775 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-service-ca\") pod \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.313846 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-serving-cert\") pod \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\" (UID: \"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812\") " Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.315256 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" (UID: "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.315289 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-config" (OuterVolumeSpecName: "console-config") pod "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" (UID: "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.315864 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" (UID: "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.315943 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-service-ca" (OuterVolumeSpecName: "service-ca") pod "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" (UID: "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.317831 4769 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.317856 4769 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.317865 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.317874 4769 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.320032 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" (UID: "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.321402 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" (UID: "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.325366 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-kube-api-access-q8r4r" (OuterVolumeSpecName: "kube-api-access-q8r4r") pod "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" (UID: "62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812"). InnerVolumeSpecName "kube-api-access-q8r4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.427431 4769 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.427466 4769 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.427477 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8r4r\" (UniqueName: \"kubernetes.io/projected/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812-kube-api-access-q8r4r\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.911858 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-78789f5b5d-sxnbq_62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812/console/0.log" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.911951 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-78789f5b5d-sxnbq" event={"ID":"62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812","Type":"ContainerDied","Data":"e75b5ad37959e4586a1be4258b76a969d83bcb4056dcf94902b5105193c8a356"} Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.912043 4769 scope.go:117] "RemoveContainer" containerID="3b2b57e5b9fb9a88e4081fb58f7886a7c02655a9c722c3341e810984cd8955c7" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.912076 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-78789f5b5d-sxnbq" Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.955830 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-78789f5b5d-sxnbq"] Nov 25 10:00:20 crc kubenswrapper[4769]: I1125 10:00:20.961813 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-78789f5b5d-sxnbq"] Nov 25 10:00:22 crc kubenswrapper[4769]: I1125 10:00:22.249089 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" path="/var/lib/kubelet/pods/62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812/volumes" Nov 25 10:00:22 crc kubenswrapper[4769]: I1125 10:00:22.290905 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:00:22 crc kubenswrapper[4769]: I1125 10:00:22.291004 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:00:22 crc kubenswrapper[4769]: I1125 10:00:22.291071 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 10:00:22 crc kubenswrapper[4769]: I1125 10:00:22.291956 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"890cbbbb644d2d20db67bdcbf036dd9908902406f9cea9d984225deeb8a33fbe"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:00:22 crc kubenswrapper[4769]: I1125 10:00:22.292038 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://890cbbbb644d2d20db67bdcbf036dd9908902406f9cea9d984225deeb8a33fbe" gracePeriod=600 Nov 25 10:00:22 crc kubenswrapper[4769]: I1125 10:00:22.936170 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="890cbbbb644d2d20db67bdcbf036dd9908902406f9cea9d984225deeb8a33fbe" exitCode=0 Nov 25 10:00:22 crc kubenswrapper[4769]: I1125 10:00:22.936224 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"890cbbbb644d2d20db67bdcbf036dd9908902406f9cea9d984225deeb8a33fbe"} Nov 25 10:00:22 crc kubenswrapper[4769]: I1125 10:00:22.936621 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"cb83aff5daeabf3c346d649af9f679bac8007d12e211a6552aed7e17452f752f"} Nov 25 10:00:22 crc kubenswrapper[4769]: I1125 10:00:22.936655 4769 scope.go:117] "RemoveContainer" containerID="2b789aba899603f1a14199b861fe977351ed8d7e8ef00a45b850160445e4d04d" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.298813 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd"] Nov 25 10:00:23 crc kubenswrapper[4769]: E1125 10:00:23.299503 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" containerName="console" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.299516 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" containerName="console" Nov 25 10:00:23 crc kubenswrapper[4769]: E1125 10:00:23.299537 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ebbb140-bc0e-47b2-9d3e-b8db0f609729" containerName="collect-profiles" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.299544 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ebbb140-bc0e-47b2-9d3e-b8db0f609729" containerName="collect-profiles" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.299703 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ebbb140-bc0e-47b2-9d3e-b8db0f609729" containerName="collect-profiles" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.299717 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="62fe8d7d-d9ae-4f2e-9ce1-e849f0a5f812" containerName="console" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.300941 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.316736 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.317337 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd"] Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.389179 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd\" (UID: \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.389255 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd\" (UID: \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.389330 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvbsl\" (UniqueName: \"kubernetes.io/projected/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-kube-api-access-cvbsl\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd\" (UID: \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.490890 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd\" (UID: \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.490982 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd\" (UID: \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.491063 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvbsl\" (UniqueName: \"kubernetes.io/projected/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-kube-api-access-cvbsl\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd\" (UID: \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.491731 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd\" (UID: \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.491916 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd\" (UID: \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.522087 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvbsl\" (UniqueName: \"kubernetes.io/projected/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-kube-api-access-cvbsl\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd\" (UID: \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:23 crc kubenswrapper[4769]: I1125 10:00:23.619686 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:24 crc kubenswrapper[4769]: I1125 10:00:24.132250 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd"] Nov 25 10:00:24 crc kubenswrapper[4769]: I1125 10:00:24.962801 4769 generic.go:334] "Generic (PLEG): container finished" podID="aec3e6b9-d42c-49a5-bbd2-e99a688bb029" containerID="66b71507b05cb7d3716d7187c830e06cb45052caeca9919a6ce48079559ec979" exitCode=0 Nov 25 10:00:24 crc kubenswrapper[4769]: I1125 10:00:24.963034 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" event={"ID":"aec3e6b9-d42c-49a5-bbd2-e99a688bb029","Type":"ContainerDied","Data":"66b71507b05cb7d3716d7187c830e06cb45052caeca9919a6ce48079559ec979"} Nov 25 10:00:24 crc kubenswrapper[4769]: I1125 10:00:24.966216 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" event={"ID":"aec3e6b9-d42c-49a5-bbd2-e99a688bb029","Type":"ContainerStarted","Data":"e879ab3d59b3cbfe3506471a5f1ffac3f4af0a677fce075b5f2b8b233ad3a5ac"} Nov 25 10:00:26 crc kubenswrapper[4769]: I1125 10:00:26.992183 4769 generic.go:334] "Generic (PLEG): container finished" podID="aec3e6b9-d42c-49a5-bbd2-e99a688bb029" containerID="7bb157601f3dbe474f76bb809b6b104b73a9de7cc9eee9f11a2054f5a528c2fb" exitCode=0 Nov 25 10:00:26 crc kubenswrapper[4769]: I1125 10:00:26.992244 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" event={"ID":"aec3e6b9-d42c-49a5-bbd2-e99a688bb029","Type":"ContainerDied","Data":"7bb157601f3dbe474f76bb809b6b104b73a9de7cc9eee9f11a2054f5a528c2fb"} Nov 25 10:00:28 crc kubenswrapper[4769]: I1125 10:00:28.015281 4769 generic.go:334] "Generic (PLEG): container finished" podID="aec3e6b9-d42c-49a5-bbd2-e99a688bb029" containerID="b1e9d4e13b1066da491417cadacd3a6e68d42ef41f64feeeec6fe8b6a100ddea" exitCode=0 Nov 25 10:00:28 crc kubenswrapper[4769]: I1125 10:00:28.015645 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" event={"ID":"aec3e6b9-d42c-49a5-bbd2-e99a688bb029","Type":"ContainerDied","Data":"b1e9d4e13b1066da491417cadacd3a6e68d42ef41f64feeeec6fe8b6a100ddea"} Nov 25 10:00:29 crc kubenswrapper[4769]: I1125 10:00:29.407034 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:29 crc kubenswrapper[4769]: I1125 10:00:29.553582 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-bundle\") pod \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\" (UID: \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\") " Nov 25 10:00:29 crc kubenswrapper[4769]: I1125 10:00:29.553863 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvbsl\" (UniqueName: \"kubernetes.io/projected/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-kube-api-access-cvbsl\") pod \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\" (UID: \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\") " Nov 25 10:00:29 crc kubenswrapper[4769]: I1125 10:00:29.553908 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-util\") pod \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\" (UID: \"aec3e6b9-d42c-49a5-bbd2-e99a688bb029\") " Nov 25 10:00:29 crc kubenswrapper[4769]: I1125 10:00:29.556105 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-bundle" (OuterVolumeSpecName: "bundle") pod "aec3e6b9-d42c-49a5-bbd2-e99a688bb029" (UID: "aec3e6b9-d42c-49a5-bbd2-e99a688bb029"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:29 crc kubenswrapper[4769]: I1125 10:00:29.565277 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-kube-api-access-cvbsl" (OuterVolumeSpecName: "kube-api-access-cvbsl") pod "aec3e6b9-d42c-49a5-bbd2-e99a688bb029" (UID: "aec3e6b9-d42c-49a5-bbd2-e99a688bb029"). InnerVolumeSpecName "kube-api-access-cvbsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:29 crc kubenswrapper[4769]: I1125 10:00:29.568991 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-util" (OuterVolumeSpecName: "util") pod "aec3e6b9-d42c-49a5-bbd2-e99a688bb029" (UID: "aec3e6b9-d42c-49a5-bbd2-e99a688bb029"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:29 crc kubenswrapper[4769]: I1125 10:00:29.656422 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvbsl\" (UniqueName: \"kubernetes.io/projected/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-kube-api-access-cvbsl\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:29 crc kubenswrapper[4769]: I1125 10:00:29.656473 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-util\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:29 crc kubenswrapper[4769]: I1125 10:00:29.656485 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/aec3e6b9-d42c-49a5-bbd2-e99a688bb029-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:30 crc kubenswrapper[4769]: I1125 10:00:30.042052 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" event={"ID":"aec3e6b9-d42c-49a5-bbd2-e99a688bb029","Type":"ContainerDied","Data":"e879ab3d59b3cbfe3506471a5f1ffac3f4af0a677fce075b5f2b8b233ad3a5ac"} Nov 25 10:00:30 crc kubenswrapper[4769]: I1125 10:00:30.042114 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e879ab3d59b3cbfe3506471a5f1ffac3f4af0a677fce075b5f2b8b233ad3a5ac" Nov 25 10:00:30 crc kubenswrapper[4769]: I1125 10:00:30.042299 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd" Nov 25 10:00:37 crc kubenswrapper[4769]: I1125 10:00:37.993665 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq"] Nov 25 10:00:37 crc kubenswrapper[4769]: E1125 10:00:37.994905 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aec3e6b9-d42c-49a5-bbd2-e99a688bb029" containerName="extract" Nov 25 10:00:37 crc kubenswrapper[4769]: I1125 10:00:37.994921 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="aec3e6b9-d42c-49a5-bbd2-e99a688bb029" containerName="extract" Nov 25 10:00:37 crc kubenswrapper[4769]: E1125 10:00:37.994933 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aec3e6b9-d42c-49a5-bbd2-e99a688bb029" containerName="util" Nov 25 10:00:37 crc kubenswrapper[4769]: I1125 10:00:37.994939 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="aec3e6b9-d42c-49a5-bbd2-e99a688bb029" containerName="util" Nov 25 10:00:37 crc kubenswrapper[4769]: E1125 10:00:37.994947 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aec3e6b9-d42c-49a5-bbd2-e99a688bb029" containerName="pull" Nov 25 10:00:37 crc kubenswrapper[4769]: I1125 10:00:37.994953 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="aec3e6b9-d42c-49a5-bbd2-e99a688bb029" containerName="pull" Nov 25 10:00:37 crc kubenswrapper[4769]: I1125 10:00:37.995107 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="aec3e6b9-d42c-49a5-bbd2-e99a688bb029" containerName="extract" Nov 25 10:00:37 crc kubenswrapper[4769]: I1125 10:00:37.995707 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.001814 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.002107 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-dx6j5" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.002278 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.002392 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.002493 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.011327 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq"] Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.119344 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld4d7\" (UniqueName: \"kubernetes.io/projected/9441dbc7-716c-413e-b0ea-bf1ef05b1608-kube-api-access-ld4d7\") pod \"metallb-operator-controller-manager-64569bb78d-pzqdq\" (UID: \"9441dbc7-716c-413e-b0ea-bf1ef05b1608\") " pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.119402 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9441dbc7-716c-413e-b0ea-bf1ef05b1608-webhook-cert\") pod \"metallb-operator-controller-manager-64569bb78d-pzqdq\" (UID: \"9441dbc7-716c-413e-b0ea-bf1ef05b1608\") " pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.119896 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9441dbc7-716c-413e-b0ea-bf1ef05b1608-apiservice-cert\") pod \"metallb-operator-controller-manager-64569bb78d-pzqdq\" (UID: \"9441dbc7-716c-413e-b0ea-bf1ef05b1608\") " pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.222157 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9441dbc7-716c-413e-b0ea-bf1ef05b1608-apiservice-cert\") pod \"metallb-operator-controller-manager-64569bb78d-pzqdq\" (UID: \"9441dbc7-716c-413e-b0ea-bf1ef05b1608\") " pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.222237 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld4d7\" (UniqueName: \"kubernetes.io/projected/9441dbc7-716c-413e-b0ea-bf1ef05b1608-kube-api-access-ld4d7\") pod \"metallb-operator-controller-manager-64569bb78d-pzqdq\" (UID: \"9441dbc7-716c-413e-b0ea-bf1ef05b1608\") " pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.222262 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9441dbc7-716c-413e-b0ea-bf1ef05b1608-webhook-cert\") pod \"metallb-operator-controller-manager-64569bb78d-pzqdq\" (UID: \"9441dbc7-716c-413e-b0ea-bf1ef05b1608\") " pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.230886 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9441dbc7-716c-413e-b0ea-bf1ef05b1608-apiservice-cert\") pod \"metallb-operator-controller-manager-64569bb78d-pzqdq\" (UID: \"9441dbc7-716c-413e-b0ea-bf1ef05b1608\") " pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.237818 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9441dbc7-716c-413e-b0ea-bf1ef05b1608-webhook-cert\") pod \"metallb-operator-controller-manager-64569bb78d-pzqdq\" (UID: \"9441dbc7-716c-413e-b0ea-bf1ef05b1608\") " pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.291668 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld4d7\" (UniqueName: \"kubernetes.io/projected/9441dbc7-716c-413e-b0ea-bf1ef05b1608-kube-api-access-ld4d7\") pod \"metallb-operator-controller-manager-64569bb78d-pzqdq\" (UID: \"9441dbc7-716c-413e-b0ea-bf1ef05b1608\") " pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.315778 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr"] Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.317230 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.320782 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.321166 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.321354 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-4ctxp" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.323598 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr"] Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.326919 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.427449 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55lxs\" (UniqueName: \"kubernetes.io/projected/395976a4-ddee-425d-994f-c913076f1710-kube-api-access-55lxs\") pod \"metallb-operator-webhook-server-55c9569f76-llhqr\" (UID: \"395976a4-ddee-425d-994f-c913076f1710\") " pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.427881 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/395976a4-ddee-425d-994f-c913076f1710-webhook-cert\") pod \"metallb-operator-webhook-server-55c9569f76-llhqr\" (UID: \"395976a4-ddee-425d-994f-c913076f1710\") " pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.427903 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/395976a4-ddee-425d-994f-c913076f1710-apiservice-cert\") pod \"metallb-operator-webhook-server-55c9569f76-llhqr\" (UID: \"395976a4-ddee-425d-994f-c913076f1710\") " pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.528957 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/395976a4-ddee-425d-994f-c913076f1710-webhook-cert\") pod \"metallb-operator-webhook-server-55c9569f76-llhqr\" (UID: \"395976a4-ddee-425d-994f-c913076f1710\") " pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.529185 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/395976a4-ddee-425d-994f-c913076f1710-apiservice-cert\") pod \"metallb-operator-webhook-server-55c9569f76-llhqr\" (UID: \"395976a4-ddee-425d-994f-c913076f1710\") " pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.529298 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55lxs\" (UniqueName: \"kubernetes.io/projected/395976a4-ddee-425d-994f-c913076f1710-kube-api-access-55lxs\") pod \"metallb-operator-webhook-server-55c9569f76-llhqr\" (UID: \"395976a4-ddee-425d-994f-c913076f1710\") " pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.535721 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/395976a4-ddee-425d-994f-c913076f1710-apiservice-cert\") pod \"metallb-operator-webhook-server-55c9569f76-llhqr\" (UID: \"395976a4-ddee-425d-994f-c913076f1710\") " pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.536221 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/395976a4-ddee-425d-994f-c913076f1710-webhook-cert\") pod \"metallb-operator-webhook-server-55c9569f76-llhqr\" (UID: \"395976a4-ddee-425d-994f-c913076f1710\") " pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.547214 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55lxs\" (UniqueName: \"kubernetes.io/projected/395976a4-ddee-425d-994f-c913076f1710-kube-api-access-55lxs\") pod \"metallb-operator-webhook-server-55c9569f76-llhqr\" (UID: \"395976a4-ddee-425d-994f-c913076f1710\") " pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.640772 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:00:38 crc kubenswrapper[4769]: I1125 10:00:38.816166 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq"] Nov 25 10:00:39 crc kubenswrapper[4769]: W1125 10:00:39.102183 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod395976a4_ddee_425d_994f_c913076f1710.slice/crio-7aa29afdf00f008e3acc191bd5a70440f60f8966e191b894bcee6a4923f58a8e WatchSource:0}: Error finding container 7aa29afdf00f008e3acc191bd5a70440f60f8966e191b894bcee6a4923f58a8e: Status 404 returned error can't find the container with id 7aa29afdf00f008e3acc191bd5a70440f60f8966e191b894bcee6a4923f58a8e Nov 25 10:00:39 crc kubenswrapper[4769]: I1125 10:00:39.103009 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr"] Nov 25 10:00:39 crc kubenswrapper[4769]: I1125 10:00:39.113748 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" event={"ID":"9441dbc7-716c-413e-b0ea-bf1ef05b1608","Type":"ContainerStarted","Data":"e8df4a4a4c8b244c57394cb05d5d1079ee64d571f868185ac6e7065140b4f9d6"} Nov 25 10:00:39 crc kubenswrapper[4769]: I1125 10:00:39.114634 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" event={"ID":"395976a4-ddee-425d-994f-c913076f1710","Type":"ContainerStarted","Data":"7aa29afdf00f008e3acc191bd5a70440f60f8966e191b894bcee6a4923f58a8e"} Nov 25 10:00:46 crc kubenswrapper[4769]: I1125 10:00:46.191384 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" event={"ID":"395976a4-ddee-425d-994f-c913076f1710","Type":"ContainerStarted","Data":"813ac10ea396a9eaa89fa1926ab9528b168dfbf318b29e70798c6f6f9ab01a1a"} Nov 25 10:00:46 crc kubenswrapper[4769]: I1125 10:00:46.191890 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:00:46 crc kubenswrapper[4769]: I1125 10:00:46.194338 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" event={"ID":"9441dbc7-716c-413e-b0ea-bf1ef05b1608","Type":"ContainerStarted","Data":"4ab3af93f3df09f3bd9984b319746b513e6c0b970fbaaafbf6a6c79973534d0b"} Nov 25 10:00:46 crc kubenswrapper[4769]: I1125 10:00:46.194506 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:00:46 crc kubenswrapper[4769]: I1125 10:00:46.223422 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" podStartSLOduration=1.901747209 podStartE2EDuration="8.22339296s" podCreationTimestamp="2025-11-25 10:00:38 +0000 UTC" firstStartedPulling="2025-11-25 10:00:39.104937871 +0000 UTC m=+987.689910184" lastFinishedPulling="2025-11-25 10:00:45.426583622 +0000 UTC m=+994.011555935" observedRunningTime="2025-11-25 10:00:46.215392461 +0000 UTC m=+994.800364784" watchObservedRunningTime="2025-11-25 10:00:46.22339296 +0000 UTC m=+994.808365283" Nov 25 10:00:46 crc kubenswrapper[4769]: I1125 10:00:46.253699 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" podStartSLOduration=2.68406716 podStartE2EDuration="9.25366942s" podCreationTimestamp="2025-11-25 10:00:37 +0000 UTC" firstStartedPulling="2025-11-25 10:00:38.838348821 +0000 UTC m=+987.423321144" lastFinishedPulling="2025-11-25 10:00:45.407951091 +0000 UTC m=+993.992923404" observedRunningTime="2025-11-25 10:00:46.246288088 +0000 UTC m=+994.831260401" watchObservedRunningTime="2025-11-25 10:00:46.25366942 +0000 UTC m=+994.838641753" Nov 25 10:00:58 crc kubenswrapper[4769]: I1125 10:00:58.647500 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" Nov 25 10:01:18 crc kubenswrapper[4769]: I1125 10:01:18.331196 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.069649 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-zwpnn"] Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.074542 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.078048 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-975vr" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.078300 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.078404 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.078049 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-v89x9"] Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.079861 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.082120 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.090980 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-v89x9"] Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.119957 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/aa14b8e2-159e-4850-8816-14bc635838ac-metrics\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.120129 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aa14b8e2-159e-4850-8816-14bc635838ac-metrics-certs\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.120186 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/aa14b8e2-159e-4850-8816-14bc635838ac-frr-sockets\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.120217 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8df830ab-ab7a-49cd-b7d4-72d44c99cc4f-cert\") pod \"frr-k8s-webhook-server-6998585d5-v89x9\" (UID: \"8df830ab-ab7a-49cd-b7d4-72d44c99cc4f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.120248 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mk9x\" (UniqueName: \"kubernetes.io/projected/aa14b8e2-159e-4850-8816-14bc635838ac-kube-api-access-8mk9x\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.120297 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/aa14b8e2-159e-4850-8816-14bc635838ac-frr-conf\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.120353 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/aa14b8e2-159e-4850-8816-14bc635838ac-frr-startup\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.120387 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/aa14b8e2-159e-4850-8816-14bc635838ac-reloader\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.120433 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jg7z9\" (UniqueName: \"kubernetes.io/projected/8df830ab-ab7a-49cd-b7d4-72d44c99cc4f-kube-api-access-jg7z9\") pod \"frr-k8s-webhook-server-6998585d5-v89x9\" (UID: \"8df830ab-ab7a-49cd-b7d4-72d44c99cc4f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.187004 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-84t8t"] Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.188408 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.195461 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.195771 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.195888 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.200461 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-8k9g6"] Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.201846 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-9bhp9" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.204918 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.208574 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.221528 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/aa14b8e2-159e-4850-8816-14bc635838ac-frr-startup\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.222876 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-metrics-certs\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.223044 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/aa14b8e2-159e-4850-8816-14bc635838ac-reloader\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.223218 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jg7z9\" (UniqueName: \"kubernetes.io/projected/8df830ab-ab7a-49cd-b7d4-72d44c99cc4f-kube-api-access-jg7z9\") pod \"frr-k8s-webhook-server-6998585d5-v89x9\" (UID: \"8df830ab-ab7a-49cd-b7d4-72d44c99cc4f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.223347 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-memberlist\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.223452 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/aa14b8e2-159e-4850-8816-14bc635838ac-frr-startup\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.223462 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/aa14b8e2-159e-4850-8816-14bc635838ac-metrics\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.223561 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/aa14b8e2-159e-4850-8816-14bc635838ac-reloader\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.221658 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-8k9g6"] Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.223766 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aa14b8e2-159e-4850-8816-14bc635838ac-metrics-certs\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.223818 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-metallb-excludel2\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.223953 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/aa14b8e2-159e-4850-8816-14bc635838ac-frr-sockets\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.224004 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsj82\" (UniqueName: \"kubernetes.io/projected/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-kube-api-access-zsj82\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.224052 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8df830ab-ab7a-49cd-b7d4-72d44c99cc4f-cert\") pod \"frr-k8s-webhook-server-6998585d5-v89x9\" (UID: \"8df830ab-ab7a-49cd-b7d4-72d44c99cc4f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.224090 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mk9x\" (UniqueName: \"kubernetes.io/projected/aa14b8e2-159e-4850-8816-14bc635838ac-kube-api-access-8mk9x\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.224202 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/aa14b8e2-159e-4850-8816-14bc635838ac-frr-conf\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.224397 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/aa14b8e2-159e-4850-8816-14bc635838ac-metrics\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.224650 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/aa14b8e2-159e-4850-8816-14bc635838ac-frr-conf\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: E1125 10:01:19.224775 4769 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 25 10:01:19 crc kubenswrapper[4769]: E1125 10:01:19.224852 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8df830ab-ab7a-49cd-b7d4-72d44c99cc4f-cert podName:8df830ab-ab7a-49cd-b7d4-72d44c99cc4f nodeName:}" failed. No retries permitted until 2025-11-25 10:01:19.724828817 +0000 UTC m=+1028.309801130 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8df830ab-ab7a-49cd-b7d4-72d44c99cc4f-cert") pod "frr-k8s-webhook-server-6998585d5-v89x9" (UID: "8df830ab-ab7a-49cd-b7d4-72d44c99cc4f") : secret "frr-k8s-webhook-server-cert" not found Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.225076 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/aa14b8e2-159e-4850-8816-14bc635838ac-frr-sockets\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.245189 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/aa14b8e2-159e-4850-8816-14bc635838ac-metrics-certs\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.264102 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jg7z9\" (UniqueName: \"kubernetes.io/projected/8df830ab-ab7a-49cd-b7d4-72d44c99cc4f-kube-api-access-jg7z9\") pod \"frr-k8s-webhook-server-6998585d5-v89x9\" (UID: \"8df830ab-ab7a-49cd-b7d4-72d44c99cc4f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.270713 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mk9x\" (UniqueName: \"kubernetes.io/projected/aa14b8e2-159e-4850-8816-14bc635838ac-kube-api-access-8mk9x\") pod \"frr-k8s-zwpnn\" (UID: \"aa14b8e2-159e-4850-8816-14bc635838ac\") " pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.326386 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-memberlist\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.326498 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d64c9e2e-43f2-4ee9-b377-c32f743c034d-metrics-certs\") pod \"controller-6c7b4b5f48-8k9g6\" (UID: \"d64c9e2e-43f2-4ee9-b377-c32f743c034d\") " pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.326521 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-metallb-excludel2\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.326553 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsj82\" (UniqueName: \"kubernetes.io/projected/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-kube-api-access-zsj82\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: E1125 10:01:19.326553 4769 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.326640 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d64c9e2e-43f2-4ee9-b377-c32f743c034d-cert\") pod \"controller-6c7b4b5f48-8k9g6\" (UID: \"d64c9e2e-43f2-4ee9-b377-c32f743c034d\") " pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:19 crc kubenswrapper[4769]: E1125 10:01:19.326675 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-memberlist podName:ce9a407b-6a62-48d8-a15d-e08c1b09c3e3 nodeName:}" failed. No retries permitted until 2025-11-25 10:01:19.826654109 +0000 UTC m=+1028.411626422 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-memberlist") pod "speaker-84t8t" (UID: "ce9a407b-6a62-48d8-a15d-e08c1b09c3e3") : secret "metallb-memberlist" not found Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.326804 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-metrics-certs\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.326845 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ns6kq\" (UniqueName: \"kubernetes.io/projected/d64c9e2e-43f2-4ee9-b377-c32f743c034d-kube-api-access-ns6kq\") pod \"controller-6c7b4b5f48-8k9g6\" (UID: \"d64c9e2e-43f2-4ee9-b377-c32f743c034d\") " pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:19 crc kubenswrapper[4769]: E1125 10:01:19.327687 4769 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 25 10:01:19 crc kubenswrapper[4769]: E1125 10:01:19.327736 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-metrics-certs podName:ce9a407b-6a62-48d8-a15d-e08c1b09c3e3 nodeName:}" failed. No retries permitted until 2025-11-25 10:01:19.827721549 +0000 UTC m=+1028.412693852 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-metrics-certs") pod "speaker-84t8t" (UID: "ce9a407b-6a62-48d8-a15d-e08c1b09c3e3") : secret "speaker-certs-secret" not found Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.328604 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-metallb-excludel2\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.353499 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsj82\" (UniqueName: \"kubernetes.io/projected/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-kube-api-access-zsj82\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.407442 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.429289 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d64c9e2e-43f2-4ee9-b377-c32f743c034d-metrics-certs\") pod \"controller-6c7b4b5f48-8k9g6\" (UID: \"d64c9e2e-43f2-4ee9-b377-c32f743c034d\") " pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.429399 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d64c9e2e-43f2-4ee9-b377-c32f743c034d-cert\") pod \"controller-6c7b4b5f48-8k9g6\" (UID: \"d64c9e2e-43f2-4ee9-b377-c32f743c034d\") " pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.429434 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ns6kq\" (UniqueName: \"kubernetes.io/projected/d64c9e2e-43f2-4ee9-b377-c32f743c034d-kube-api-access-ns6kq\") pod \"controller-6c7b4b5f48-8k9g6\" (UID: \"d64c9e2e-43f2-4ee9-b377-c32f743c034d\") " pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.432389 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.433820 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d64c9e2e-43f2-4ee9-b377-c32f743c034d-metrics-certs\") pod \"controller-6c7b4b5f48-8k9g6\" (UID: \"d64c9e2e-43f2-4ee9-b377-c32f743c034d\") " pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.445556 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d64c9e2e-43f2-4ee9-b377-c32f743c034d-cert\") pod \"controller-6c7b4b5f48-8k9g6\" (UID: \"d64c9e2e-43f2-4ee9-b377-c32f743c034d\") " pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.449155 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ns6kq\" (UniqueName: \"kubernetes.io/projected/d64c9e2e-43f2-4ee9-b377-c32f743c034d-kube-api-access-ns6kq\") pod \"controller-6c7b4b5f48-8k9g6\" (UID: \"d64c9e2e-43f2-4ee9-b377-c32f743c034d\") " pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.537204 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.738712 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8df830ab-ab7a-49cd-b7d4-72d44c99cc4f-cert\") pod \"frr-k8s-webhook-server-6998585d5-v89x9\" (UID: \"8df830ab-ab7a-49cd-b7d4-72d44c99cc4f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.745525 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8df830ab-ab7a-49cd-b7d4-72d44c99cc4f-cert\") pod \"frr-k8s-webhook-server-6998585d5-v89x9\" (UID: \"8df830ab-ab7a-49cd-b7d4-72d44c99cc4f\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.841065 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-metrics-certs\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: E1125 10:01:19.841367 4769 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 10:01:19 crc kubenswrapper[4769]: E1125 10:01:19.841437 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-memberlist podName:ce9a407b-6a62-48d8-a15d-e08c1b09c3e3 nodeName:}" failed. No retries permitted until 2025-11-25 10:01:20.841415494 +0000 UTC m=+1029.426387807 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-memberlist") pod "speaker-84t8t" (UID: "ce9a407b-6a62-48d8-a15d-e08c1b09c3e3") : secret "metallb-memberlist" not found Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.841200 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-memberlist\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:19 crc kubenswrapper[4769]: I1125 10:01:19.844389 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-metrics-certs\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:20 crc kubenswrapper[4769]: I1125 10:01:20.023298 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-8k9g6"] Nov 25 10:01:20 crc kubenswrapper[4769]: I1125 10:01:20.035251 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" Nov 25 10:01:20 crc kubenswrapper[4769]: W1125 10:01:20.038704 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd64c9e2e_43f2_4ee9_b377_c32f743c034d.slice/crio-3717ea6f86a57a9054d78953735f3ada3035fdab76bba9d8e4df36fbd041e730 WatchSource:0}: Error finding container 3717ea6f86a57a9054d78953735f3ada3035fdab76bba9d8e4df36fbd041e730: Status 404 returned error can't find the container with id 3717ea6f86a57a9054d78953735f3ada3035fdab76bba9d8e4df36fbd041e730 Nov 25 10:01:20 crc kubenswrapper[4769]: I1125 10:01:20.526190 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-8k9g6" event={"ID":"d64c9e2e-43f2-4ee9-b377-c32f743c034d","Type":"ContainerStarted","Data":"22c69608bd171ac53ac1993d5054aca8559617f9837738b42e79b837637feb16"} Nov 25 10:01:20 crc kubenswrapper[4769]: I1125 10:01:20.527083 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-8k9g6" event={"ID":"d64c9e2e-43f2-4ee9-b377-c32f743c034d","Type":"ContainerStarted","Data":"652accf28a5346e2c58c6ea933e7f09ee768f72e0d825c9e6c7ea7da5c256fe3"} Nov 25 10:01:20 crc kubenswrapper[4769]: I1125 10:01:20.527114 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-8k9g6" event={"ID":"d64c9e2e-43f2-4ee9-b377-c32f743c034d","Type":"ContainerStarted","Data":"3717ea6f86a57a9054d78953735f3ada3035fdab76bba9d8e4df36fbd041e730"} Nov 25 10:01:20 crc kubenswrapper[4769]: I1125 10:01:20.529258 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:20 crc kubenswrapper[4769]: I1125 10:01:20.534946 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zwpnn" event={"ID":"aa14b8e2-159e-4850-8816-14bc635838ac","Type":"ContainerStarted","Data":"c6408c2482a2020e8882dc784aea65c5206f4eac7712de0e5e9ca521d345c64e"} Nov 25 10:01:20 crc kubenswrapper[4769]: I1125 10:01:20.551095 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-v89x9"] Nov 25 10:01:20 crc kubenswrapper[4769]: I1125 10:01:20.560882 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-8k9g6" podStartSLOduration=1.56085641 podStartE2EDuration="1.56085641s" podCreationTimestamp="2025-11-25 10:01:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:20.554463725 +0000 UTC m=+1029.139436038" watchObservedRunningTime="2025-11-25 10:01:20.56085641 +0000 UTC m=+1029.145828723" Nov 25 10:01:20 crc kubenswrapper[4769]: I1125 10:01:20.871981 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-memberlist\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:20 crc kubenswrapper[4769]: I1125 10:01:20.879537 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/ce9a407b-6a62-48d8-a15d-e08c1b09c3e3-memberlist\") pod \"speaker-84t8t\" (UID: \"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3\") " pod="metallb-system/speaker-84t8t" Nov 25 10:01:21 crc kubenswrapper[4769]: I1125 10:01:21.021455 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-84t8t" Nov 25 10:01:21 crc kubenswrapper[4769]: W1125 10:01:21.062401 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podce9a407b_6a62_48d8_a15d_e08c1b09c3e3.slice/crio-5d863f26f6d14137461d30b09740232daa8160c0c35668da38474614a48f5345 WatchSource:0}: Error finding container 5d863f26f6d14137461d30b09740232daa8160c0c35668da38474614a48f5345: Status 404 returned error can't find the container with id 5d863f26f6d14137461d30b09740232daa8160c0c35668da38474614a48f5345 Nov 25 10:01:21 crc kubenswrapper[4769]: I1125 10:01:21.547736 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-84t8t" event={"ID":"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3","Type":"ContainerStarted","Data":"73fdde22178dd9d7423bb87235407e92c8884fde4edfcc9a1ec0250e214af9ca"} Nov 25 10:01:21 crc kubenswrapper[4769]: I1125 10:01:21.548168 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-84t8t" event={"ID":"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3","Type":"ContainerStarted","Data":"5d863f26f6d14137461d30b09740232daa8160c0c35668da38474614a48f5345"} Nov 25 10:01:21 crc kubenswrapper[4769]: I1125 10:01:21.550953 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" event={"ID":"8df830ab-ab7a-49cd-b7d4-72d44c99cc4f","Type":"ContainerStarted","Data":"64edc2b8f3bbacbf5e203249f916db0aba634e069d059b0dd9bd7ea5740fdc6a"} Nov 25 10:01:22 crc kubenswrapper[4769]: I1125 10:01:22.563111 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-84t8t" event={"ID":"ce9a407b-6a62-48d8-a15d-e08c1b09c3e3","Type":"ContainerStarted","Data":"ced4d17ad9dadf122e0e0209a3116d47cf98434b31e97a3973f6d20ecc19db52"} Nov 25 10:01:22 crc kubenswrapper[4769]: I1125 10:01:22.563268 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-84t8t" Nov 25 10:01:22 crc kubenswrapper[4769]: I1125 10:01:22.590698 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-84t8t" podStartSLOduration=3.5906718250000003 podStartE2EDuration="3.590671825s" podCreationTimestamp="2025-11-25 10:01:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:01:22.586993084 +0000 UTC m=+1031.171965397" watchObservedRunningTime="2025-11-25 10:01:22.590671825 +0000 UTC m=+1031.175644138" Nov 25 10:01:28 crc kubenswrapper[4769]: I1125 10:01:28.629409 4769 generic.go:334] "Generic (PLEG): container finished" podID="aa14b8e2-159e-4850-8816-14bc635838ac" containerID="890af1fb92d1cd0a16804610dfe3ddc58dbdcfdfc19e55aa5fb26dd81e3ee7ea" exitCode=0 Nov 25 10:01:28 crc kubenswrapper[4769]: I1125 10:01:28.629498 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zwpnn" event={"ID":"aa14b8e2-159e-4850-8816-14bc635838ac","Type":"ContainerDied","Data":"890af1fb92d1cd0a16804610dfe3ddc58dbdcfdfc19e55aa5fb26dd81e3ee7ea"} Nov 25 10:01:28 crc kubenswrapper[4769]: I1125 10:01:28.635146 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" event={"ID":"8df830ab-ab7a-49cd-b7d4-72d44c99cc4f","Type":"ContainerStarted","Data":"e69474b9661a76405886b8eea7c4db6476214e71876ea4c405ad17c8c22cfaae"} Nov 25 10:01:28 crc kubenswrapper[4769]: I1125 10:01:28.635449 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" Nov 25 10:01:28 crc kubenswrapper[4769]: I1125 10:01:28.683990 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" podStartSLOduration=2.387707451 podStartE2EDuration="9.683943685s" podCreationTimestamp="2025-11-25 10:01:19 +0000 UTC" firstStartedPulling="2025-11-25 10:01:20.545013645 +0000 UTC m=+1029.129985958" lastFinishedPulling="2025-11-25 10:01:27.841249879 +0000 UTC m=+1036.426222192" observedRunningTime="2025-11-25 10:01:28.677036906 +0000 UTC m=+1037.262009209" watchObservedRunningTime="2025-11-25 10:01:28.683943685 +0000 UTC m=+1037.268916008" Nov 25 10:01:29 crc kubenswrapper[4769]: I1125 10:01:29.652832 4769 generic.go:334] "Generic (PLEG): container finished" podID="aa14b8e2-159e-4850-8816-14bc635838ac" containerID="cb2eafa349afb3e477df7deb4ec2799e95619f2cf026bc371be8c9f79324ddb2" exitCode=0 Nov 25 10:01:29 crc kubenswrapper[4769]: I1125 10:01:29.653756 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zwpnn" event={"ID":"aa14b8e2-159e-4850-8816-14bc635838ac","Type":"ContainerDied","Data":"cb2eafa349afb3e477df7deb4ec2799e95619f2cf026bc371be8c9f79324ddb2"} Nov 25 10:01:30 crc kubenswrapper[4769]: I1125 10:01:30.681783 4769 generic.go:334] "Generic (PLEG): container finished" podID="aa14b8e2-159e-4850-8816-14bc635838ac" containerID="f2248849e1c720c3d684621b64582f2292393843e68b1e6f5d811fbdd9e8ad87" exitCode=0 Nov 25 10:01:30 crc kubenswrapper[4769]: I1125 10:01:30.682189 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zwpnn" event={"ID":"aa14b8e2-159e-4850-8816-14bc635838ac","Type":"ContainerDied","Data":"f2248849e1c720c3d684621b64582f2292393843e68b1e6f5d811fbdd9e8ad87"} Nov 25 10:01:31 crc kubenswrapper[4769]: I1125 10:01:31.026564 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-84t8t" Nov 25 10:01:31 crc kubenswrapper[4769]: I1125 10:01:31.704098 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zwpnn" event={"ID":"aa14b8e2-159e-4850-8816-14bc635838ac","Type":"ContainerStarted","Data":"e5619ea5e2a4dbcbae8ca3847b13cb946f315bd80b9c6d60bb3af6409533d3ab"} Nov 25 10:01:31 crc kubenswrapper[4769]: I1125 10:01:31.704149 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zwpnn" event={"ID":"aa14b8e2-159e-4850-8816-14bc635838ac","Type":"ContainerStarted","Data":"c81c226f72a18205a0828ac64135801d26281630e5bbca951e02c23cb6d9a28d"} Nov 25 10:01:31 crc kubenswrapper[4769]: I1125 10:01:31.704161 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zwpnn" event={"ID":"aa14b8e2-159e-4850-8816-14bc635838ac","Type":"ContainerStarted","Data":"eacebf587c4a37945b0a75f9c952171dccc4bf4aa24f513067c9acaac9af1c38"} Nov 25 10:01:31 crc kubenswrapper[4769]: I1125 10:01:31.704172 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zwpnn" event={"ID":"aa14b8e2-159e-4850-8816-14bc635838ac","Type":"ContainerStarted","Data":"c20e734f29dedb8eca4b99b39d4bdf15bd36400abac65a4a867fc9578eb18476"} Nov 25 10:01:31 crc kubenswrapper[4769]: I1125 10:01:31.704180 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zwpnn" event={"ID":"aa14b8e2-159e-4850-8816-14bc635838ac","Type":"ContainerStarted","Data":"e879bd03a93ac608b9810776ec266b1caf90cb4fe20a3a3568c2e8396d5ef907"} Nov 25 10:01:32 crc kubenswrapper[4769]: I1125 10:01:32.718525 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-zwpnn" event={"ID":"aa14b8e2-159e-4850-8816-14bc635838ac","Type":"ContainerStarted","Data":"f79dab082d1c34c912694d1cd5fe2c842d025e4e113455ab61daa180c521c36c"} Nov 25 10:01:32 crc kubenswrapper[4769]: I1125 10:01:32.719074 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:32 crc kubenswrapper[4769]: I1125 10:01:32.748055 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-zwpnn" podStartSLOduration=5.470992767 podStartE2EDuration="13.748034304s" podCreationTimestamp="2025-11-25 10:01:19 +0000 UTC" firstStartedPulling="2025-11-25 10:01:19.571301317 +0000 UTC m=+1028.156273630" lastFinishedPulling="2025-11-25 10:01:27.848342854 +0000 UTC m=+1036.433315167" observedRunningTime="2025-11-25 10:01:32.745870337 +0000 UTC m=+1041.330842670" watchObservedRunningTime="2025-11-25 10:01:32.748034304 +0000 UTC m=+1041.333006617" Nov 25 10:01:33 crc kubenswrapper[4769]: I1125 10:01:33.993955 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-lvrnx"] Nov 25 10:01:33 crc kubenswrapper[4769]: I1125 10:01:33.995792 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lvrnx" Nov 25 10:01:34 crc kubenswrapper[4769]: I1125 10:01:34.000336 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 10:01:34 crc kubenswrapper[4769]: I1125 10:01:34.001954 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 10:01:34 crc kubenswrapper[4769]: I1125 10:01:34.009921 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-lvrnx"] Nov 25 10:01:34 crc kubenswrapper[4769]: I1125 10:01:34.017793 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-mp9wj" Nov 25 10:01:34 crc kubenswrapper[4769]: I1125 10:01:34.126590 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgnl4\" (UniqueName: \"kubernetes.io/projected/62813fad-34fd-4cad-a5f6-757978da0d2f-kube-api-access-xgnl4\") pod \"openstack-operator-index-lvrnx\" (UID: \"62813fad-34fd-4cad-a5f6-757978da0d2f\") " pod="openstack-operators/openstack-operator-index-lvrnx" Nov 25 10:01:34 crc kubenswrapper[4769]: I1125 10:01:34.229080 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgnl4\" (UniqueName: \"kubernetes.io/projected/62813fad-34fd-4cad-a5f6-757978da0d2f-kube-api-access-xgnl4\") pod \"openstack-operator-index-lvrnx\" (UID: \"62813fad-34fd-4cad-a5f6-757978da0d2f\") " pod="openstack-operators/openstack-operator-index-lvrnx" Nov 25 10:01:34 crc kubenswrapper[4769]: I1125 10:01:34.251393 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgnl4\" (UniqueName: \"kubernetes.io/projected/62813fad-34fd-4cad-a5f6-757978da0d2f-kube-api-access-xgnl4\") pod \"openstack-operator-index-lvrnx\" (UID: \"62813fad-34fd-4cad-a5f6-757978da0d2f\") " pod="openstack-operators/openstack-operator-index-lvrnx" Nov 25 10:01:34 crc kubenswrapper[4769]: I1125 10:01:34.336613 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lvrnx" Nov 25 10:01:34 crc kubenswrapper[4769]: I1125 10:01:34.408155 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:34 crc kubenswrapper[4769]: I1125 10:01:34.476394 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:34 crc kubenswrapper[4769]: I1125 10:01:34.818206 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-lvrnx"] Nov 25 10:01:35 crc kubenswrapper[4769]: I1125 10:01:35.751350 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lvrnx" event={"ID":"62813fad-34fd-4cad-a5f6-757978da0d2f","Type":"ContainerStarted","Data":"971dd569b7fa1009c6e1fd88effd5bd87b540f6f22777ec647013d1fdbe4ff9c"} Nov 25 10:01:37 crc kubenswrapper[4769]: I1125 10:01:37.371384 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-lvrnx"] Nov 25 10:01:37 crc kubenswrapper[4769]: I1125 10:01:37.977608 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-qln84"] Nov 25 10:01:37 crc kubenswrapper[4769]: I1125 10:01:37.980918 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-qln84" Nov 25 10:01:37 crc kubenswrapper[4769]: I1125 10:01:37.993179 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-qln84"] Nov 25 10:01:38 crc kubenswrapper[4769]: I1125 10:01:38.122153 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5xpq\" (UniqueName: \"kubernetes.io/projected/eeb84a4b-8771-40e8-842b-9a67b1044074-kube-api-access-k5xpq\") pod \"openstack-operator-index-qln84\" (UID: \"eeb84a4b-8771-40e8-842b-9a67b1044074\") " pod="openstack-operators/openstack-operator-index-qln84" Nov 25 10:01:38 crc kubenswrapper[4769]: I1125 10:01:38.224166 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5xpq\" (UniqueName: \"kubernetes.io/projected/eeb84a4b-8771-40e8-842b-9a67b1044074-kube-api-access-k5xpq\") pod \"openstack-operator-index-qln84\" (UID: \"eeb84a4b-8771-40e8-842b-9a67b1044074\") " pod="openstack-operators/openstack-operator-index-qln84" Nov 25 10:01:38 crc kubenswrapper[4769]: I1125 10:01:38.246920 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5xpq\" (UniqueName: \"kubernetes.io/projected/eeb84a4b-8771-40e8-842b-9a67b1044074-kube-api-access-k5xpq\") pod \"openstack-operator-index-qln84\" (UID: \"eeb84a4b-8771-40e8-842b-9a67b1044074\") " pod="openstack-operators/openstack-operator-index-qln84" Nov 25 10:01:38 crc kubenswrapper[4769]: I1125 10:01:38.319855 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-qln84" Nov 25 10:01:39 crc kubenswrapper[4769]: I1125 10:01:39.547023 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-8k9g6" Nov 25 10:01:39 crc kubenswrapper[4769]: W1125 10:01:39.800832 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeeb84a4b_8771_40e8_842b_9a67b1044074.slice/crio-deaf30b63de6485a976d4ba35a65f9b6605ec24bfd1dd045e20150cc1dbb1755 WatchSource:0}: Error finding container deaf30b63de6485a976d4ba35a65f9b6605ec24bfd1dd045e20150cc1dbb1755: Status 404 returned error can't find the container with id deaf30b63de6485a976d4ba35a65f9b6605ec24bfd1dd045e20150cc1dbb1755 Nov 25 10:01:39 crc kubenswrapper[4769]: I1125 10:01:39.801092 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lvrnx" event={"ID":"62813fad-34fd-4cad-a5f6-757978da0d2f","Type":"ContainerStarted","Data":"972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27"} Nov 25 10:01:39 crc kubenswrapper[4769]: I1125 10:01:39.801336 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-lvrnx" podUID="62813fad-34fd-4cad-a5f6-757978da0d2f" containerName="registry-server" containerID="cri-o://972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27" gracePeriod=2 Nov 25 10:01:39 crc kubenswrapper[4769]: I1125 10:01:39.805276 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-qln84"] Nov 25 10:01:39 crc kubenswrapper[4769]: I1125 10:01:39.846680 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-lvrnx" podStartSLOduration=2.385725856 podStartE2EDuration="6.846651044s" podCreationTimestamp="2025-11-25 10:01:33 +0000 UTC" firstStartedPulling="2025-11-25 10:01:34.851933824 +0000 UTC m=+1043.436906167" lastFinishedPulling="2025-11-25 10:01:39.312859022 +0000 UTC m=+1047.897831355" observedRunningTime="2025-11-25 10:01:39.840597104 +0000 UTC m=+1048.425569427" watchObservedRunningTime="2025-11-25 10:01:39.846651044 +0000 UTC m=+1048.431623367" Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.043437 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.221433 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lvrnx" Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.380048 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgnl4\" (UniqueName: \"kubernetes.io/projected/62813fad-34fd-4cad-a5f6-757978da0d2f-kube-api-access-xgnl4\") pod \"62813fad-34fd-4cad-a5f6-757978da0d2f\" (UID: \"62813fad-34fd-4cad-a5f6-757978da0d2f\") " Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.390324 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62813fad-34fd-4cad-a5f6-757978da0d2f-kube-api-access-xgnl4" (OuterVolumeSpecName: "kube-api-access-xgnl4") pod "62813fad-34fd-4cad-a5f6-757978da0d2f" (UID: "62813fad-34fd-4cad-a5f6-757978da0d2f"). InnerVolumeSpecName "kube-api-access-xgnl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.482725 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgnl4\" (UniqueName: \"kubernetes.io/projected/62813fad-34fd-4cad-a5f6-757978da0d2f-kube-api-access-xgnl4\") on node \"crc\" DevicePath \"\"" Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.812620 4769 generic.go:334] "Generic (PLEG): container finished" podID="62813fad-34fd-4cad-a5f6-757978da0d2f" containerID="972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27" exitCode=0 Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.812702 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-lvrnx" Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.812692 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lvrnx" event={"ID":"62813fad-34fd-4cad-a5f6-757978da0d2f","Type":"ContainerDied","Data":"972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27"} Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.812863 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-lvrnx" event={"ID":"62813fad-34fd-4cad-a5f6-757978da0d2f","Type":"ContainerDied","Data":"971dd569b7fa1009c6e1fd88effd5bd87b540f6f22777ec647013d1fdbe4ff9c"} Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.812905 4769 scope.go:117] "RemoveContainer" containerID="972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27" Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.814650 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-qln84" event={"ID":"eeb84a4b-8771-40e8-842b-9a67b1044074","Type":"ContainerStarted","Data":"37db3b3672bc2296af90f4adf967980a44231c7becc4b28a291e319a70fc934a"} Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.814724 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-qln84" event={"ID":"eeb84a4b-8771-40e8-842b-9a67b1044074","Type":"ContainerStarted","Data":"deaf30b63de6485a976d4ba35a65f9b6605ec24bfd1dd045e20150cc1dbb1755"} Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.841642 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-qln84" podStartSLOduration=3.779375993 podStartE2EDuration="3.841622808s" podCreationTimestamp="2025-11-25 10:01:37 +0000 UTC" firstStartedPulling="2025-11-25 10:01:39.806930904 +0000 UTC m=+1048.391903237" lastFinishedPulling="2025-11-25 10:01:39.869177729 +0000 UTC m=+1048.454150052" observedRunningTime="2025-11-25 10:01:40.83490157 +0000 UTC m=+1049.419873883" watchObservedRunningTime="2025-11-25 10:01:40.841622808 +0000 UTC m=+1049.426595121" Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.842729 4769 scope.go:117] "RemoveContainer" containerID="972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27" Nov 25 10:01:40 crc kubenswrapper[4769]: E1125 10:01:40.843341 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27\": container with ID starting with 972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27 not found: ID does not exist" containerID="972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27" Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.843417 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27"} err="failed to get container status \"972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27\": rpc error: code = NotFound desc = could not find container \"972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27\": container with ID starting with 972b8eb7805f128ff1184192542217fbf298232c6f1c684f798741663a582e27 not found: ID does not exist" Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.859495 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-lvrnx"] Nov 25 10:01:40 crc kubenswrapper[4769]: I1125 10:01:40.867837 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-lvrnx"] Nov 25 10:01:42 crc kubenswrapper[4769]: I1125 10:01:42.255862 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62813fad-34fd-4cad-a5f6-757978da0d2f" path="/var/lib/kubelet/pods/62813fad-34fd-4cad-a5f6-757978da0d2f/volumes" Nov 25 10:01:48 crc kubenswrapper[4769]: I1125 10:01:48.320621 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-qln84" Nov 25 10:01:48 crc kubenswrapper[4769]: I1125 10:01:48.321647 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-qln84" Nov 25 10:01:48 crc kubenswrapper[4769]: I1125 10:01:48.375544 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-qln84" Nov 25 10:01:48 crc kubenswrapper[4769]: I1125 10:01:48.944852 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-qln84" Nov 25 10:01:49 crc kubenswrapper[4769]: I1125 10:01:49.411915 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-zwpnn" Nov 25 10:01:55 crc kubenswrapper[4769]: I1125 10:01:55.835475 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs"] Nov 25 10:01:55 crc kubenswrapper[4769]: E1125 10:01:55.836486 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62813fad-34fd-4cad-a5f6-757978da0d2f" containerName="registry-server" Nov 25 10:01:55 crc kubenswrapper[4769]: I1125 10:01:55.836500 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="62813fad-34fd-4cad-a5f6-757978da0d2f" containerName="registry-server" Nov 25 10:01:55 crc kubenswrapper[4769]: I1125 10:01:55.836670 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="62813fad-34fd-4cad-a5f6-757978da0d2f" containerName="registry-server" Nov 25 10:01:55 crc kubenswrapper[4769]: I1125 10:01:55.838072 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:01:55 crc kubenswrapper[4769]: I1125 10:01:55.841342 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-djvv5" Nov 25 10:01:55 crc kubenswrapper[4769]: I1125 10:01:55.848056 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs"] Nov 25 10:01:55 crc kubenswrapper[4769]: I1125 10:01:55.966139 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zx7wl\" (UniqueName: \"kubernetes.io/projected/3069ed26-1a81-4909-b213-2b01fa737dd3-kube-api-access-zx7wl\") pod \"737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs\" (UID: \"3069ed26-1a81-4909-b213-2b01fa737dd3\") " pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:01:55 crc kubenswrapper[4769]: I1125 10:01:55.966209 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3069ed26-1a81-4909-b213-2b01fa737dd3-util\") pod \"737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs\" (UID: \"3069ed26-1a81-4909-b213-2b01fa737dd3\") " pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:01:55 crc kubenswrapper[4769]: I1125 10:01:55.966402 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3069ed26-1a81-4909-b213-2b01fa737dd3-bundle\") pod \"737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs\" (UID: \"3069ed26-1a81-4909-b213-2b01fa737dd3\") " pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:01:56 crc kubenswrapper[4769]: I1125 10:01:56.069733 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3069ed26-1a81-4909-b213-2b01fa737dd3-bundle\") pod \"737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs\" (UID: \"3069ed26-1a81-4909-b213-2b01fa737dd3\") " pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:01:56 crc kubenswrapper[4769]: I1125 10:01:56.070095 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zx7wl\" (UniqueName: \"kubernetes.io/projected/3069ed26-1a81-4909-b213-2b01fa737dd3-kube-api-access-zx7wl\") pod \"737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs\" (UID: \"3069ed26-1a81-4909-b213-2b01fa737dd3\") " pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:01:56 crc kubenswrapper[4769]: I1125 10:01:56.070212 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3069ed26-1a81-4909-b213-2b01fa737dd3-util\") pod \"737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs\" (UID: \"3069ed26-1a81-4909-b213-2b01fa737dd3\") " pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:01:56 crc kubenswrapper[4769]: I1125 10:01:56.070270 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3069ed26-1a81-4909-b213-2b01fa737dd3-bundle\") pod \"737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs\" (UID: \"3069ed26-1a81-4909-b213-2b01fa737dd3\") " pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:01:56 crc kubenswrapper[4769]: I1125 10:01:56.070666 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3069ed26-1a81-4909-b213-2b01fa737dd3-util\") pod \"737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs\" (UID: \"3069ed26-1a81-4909-b213-2b01fa737dd3\") " pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:01:56 crc kubenswrapper[4769]: I1125 10:01:56.092483 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zx7wl\" (UniqueName: \"kubernetes.io/projected/3069ed26-1a81-4909-b213-2b01fa737dd3-kube-api-access-zx7wl\") pod \"737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs\" (UID: \"3069ed26-1a81-4909-b213-2b01fa737dd3\") " pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:01:56 crc kubenswrapper[4769]: I1125 10:01:56.164791 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:01:56 crc kubenswrapper[4769]: I1125 10:01:56.662316 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs"] Nov 25 10:01:56 crc kubenswrapper[4769]: W1125 10:01:56.675231 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3069ed26_1a81_4909_b213_2b01fa737dd3.slice/crio-88073c769d889598b23ac3a65dee5e6be07d46ef83a984e49f0fedf6c4d9f715 WatchSource:0}: Error finding container 88073c769d889598b23ac3a65dee5e6be07d46ef83a984e49f0fedf6c4d9f715: Status 404 returned error can't find the container with id 88073c769d889598b23ac3a65dee5e6be07d46ef83a984e49f0fedf6c4d9f715 Nov 25 10:01:56 crc kubenswrapper[4769]: I1125 10:01:56.999257 4769 generic.go:334] "Generic (PLEG): container finished" podID="3069ed26-1a81-4909-b213-2b01fa737dd3" containerID="3dae53d45a0374023e0455efd751b1232c0093173f9c07d4be3d8fe7f3c1af12" exitCode=0 Nov 25 10:01:56 crc kubenswrapper[4769]: I1125 10:01:56.999319 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" event={"ID":"3069ed26-1a81-4909-b213-2b01fa737dd3","Type":"ContainerDied","Data":"3dae53d45a0374023e0455efd751b1232c0093173f9c07d4be3d8fe7f3c1af12"} Nov 25 10:01:57 crc kubenswrapper[4769]: I1125 10:01:56.999356 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" event={"ID":"3069ed26-1a81-4909-b213-2b01fa737dd3","Type":"ContainerStarted","Data":"88073c769d889598b23ac3a65dee5e6be07d46ef83a984e49f0fedf6c4d9f715"} Nov 25 10:01:58 crc kubenswrapper[4769]: I1125 10:01:58.010483 4769 generic.go:334] "Generic (PLEG): container finished" podID="3069ed26-1a81-4909-b213-2b01fa737dd3" containerID="119b2b7427e9ee4385be2a518dd818b5aab9140a6beba36e837e427b35effd51" exitCode=0 Nov 25 10:01:58 crc kubenswrapper[4769]: I1125 10:01:58.010820 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" event={"ID":"3069ed26-1a81-4909-b213-2b01fa737dd3","Type":"ContainerDied","Data":"119b2b7427e9ee4385be2a518dd818b5aab9140a6beba36e837e427b35effd51"} Nov 25 10:01:59 crc kubenswrapper[4769]: I1125 10:01:59.027490 4769 generic.go:334] "Generic (PLEG): container finished" podID="3069ed26-1a81-4909-b213-2b01fa737dd3" containerID="6d3de94fcf8100d06e37cdea240001184f6a4cb4ec3918c1dd26217123ff6032" exitCode=0 Nov 25 10:01:59 crc kubenswrapper[4769]: I1125 10:01:59.027586 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" event={"ID":"3069ed26-1a81-4909-b213-2b01fa737dd3","Type":"ContainerDied","Data":"6d3de94fcf8100d06e37cdea240001184f6a4cb4ec3918c1dd26217123ff6032"} Nov 25 10:02:00 crc kubenswrapper[4769]: I1125 10:02:00.509287 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:02:00 crc kubenswrapper[4769]: I1125 10:02:00.570807 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3069ed26-1a81-4909-b213-2b01fa737dd3-util\") pod \"3069ed26-1a81-4909-b213-2b01fa737dd3\" (UID: \"3069ed26-1a81-4909-b213-2b01fa737dd3\") " Nov 25 10:02:00 crc kubenswrapper[4769]: I1125 10:02:00.571484 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zx7wl\" (UniqueName: \"kubernetes.io/projected/3069ed26-1a81-4909-b213-2b01fa737dd3-kube-api-access-zx7wl\") pod \"3069ed26-1a81-4909-b213-2b01fa737dd3\" (UID: \"3069ed26-1a81-4909-b213-2b01fa737dd3\") " Nov 25 10:02:00 crc kubenswrapper[4769]: I1125 10:02:00.571836 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3069ed26-1a81-4909-b213-2b01fa737dd3-bundle\") pod \"3069ed26-1a81-4909-b213-2b01fa737dd3\" (UID: \"3069ed26-1a81-4909-b213-2b01fa737dd3\") " Nov 25 10:02:00 crc kubenswrapper[4769]: I1125 10:02:00.572698 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3069ed26-1a81-4909-b213-2b01fa737dd3-bundle" (OuterVolumeSpecName: "bundle") pod "3069ed26-1a81-4909-b213-2b01fa737dd3" (UID: "3069ed26-1a81-4909-b213-2b01fa737dd3"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:02:00 crc kubenswrapper[4769]: I1125 10:02:00.582089 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3069ed26-1a81-4909-b213-2b01fa737dd3-kube-api-access-zx7wl" (OuterVolumeSpecName: "kube-api-access-zx7wl") pod "3069ed26-1a81-4909-b213-2b01fa737dd3" (UID: "3069ed26-1a81-4909-b213-2b01fa737dd3"). InnerVolumeSpecName "kube-api-access-zx7wl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:00 crc kubenswrapper[4769]: I1125 10:02:00.601031 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3069ed26-1a81-4909-b213-2b01fa737dd3-util" (OuterVolumeSpecName: "util") pod "3069ed26-1a81-4909-b213-2b01fa737dd3" (UID: "3069ed26-1a81-4909-b213-2b01fa737dd3"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:02:00 crc kubenswrapper[4769]: I1125 10:02:00.675109 4769 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3069ed26-1a81-4909-b213-2b01fa737dd3-util\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:00 crc kubenswrapper[4769]: I1125 10:02:00.675177 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zx7wl\" (UniqueName: \"kubernetes.io/projected/3069ed26-1a81-4909-b213-2b01fa737dd3-kube-api-access-zx7wl\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:00 crc kubenswrapper[4769]: I1125 10:02:00.675200 4769 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3069ed26-1a81-4909-b213-2b01fa737dd3-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:01 crc kubenswrapper[4769]: I1125 10:02:01.057988 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" event={"ID":"3069ed26-1a81-4909-b213-2b01fa737dd3","Type":"ContainerDied","Data":"88073c769d889598b23ac3a65dee5e6be07d46ef83a984e49f0fedf6c4d9f715"} Nov 25 10:02:01 crc kubenswrapper[4769]: I1125 10:02:01.058061 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88073c769d889598b23ac3a65dee5e6be07d46ef83a984e49f0fedf6c4d9f715" Nov 25 10:02:01 crc kubenswrapper[4769]: I1125 10:02:01.058068 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs" Nov 25 10:02:08 crc kubenswrapper[4769]: I1125 10:02:08.578912 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr"] Nov 25 10:02:08 crc kubenswrapper[4769]: E1125 10:02:08.580239 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3069ed26-1a81-4909-b213-2b01fa737dd3" containerName="util" Nov 25 10:02:08 crc kubenswrapper[4769]: I1125 10:02:08.580261 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3069ed26-1a81-4909-b213-2b01fa737dd3" containerName="util" Nov 25 10:02:08 crc kubenswrapper[4769]: E1125 10:02:08.580308 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3069ed26-1a81-4909-b213-2b01fa737dd3" containerName="pull" Nov 25 10:02:08 crc kubenswrapper[4769]: I1125 10:02:08.580316 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3069ed26-1a81-4909-b213-2b01fa737dd3" containerName="pull" Nov 25 10:02:08 crc kubenswrapper[4769]: E1125 10:02:08.580335 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3069ed26-1a81-4909-b213-2b01fa737dd3" containerName="extract" Nov 25 10:02:08 crc kubenswrapper[4769]: I1125 10:02:08.580342 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3069ed26-1a81-4909-b213-2b01fa737dd3" containerName="extract" Nov 25 10:02:08 crc kubenswrapper[4769]: I1125 10:02:08.580505 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3069ed26-1a81-4909-b213-2b01fa737dd3" containerName="extract" Nov 25 10:02:08 crc kubenswrapper[4769]: I1125 10:02:08.581184 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" Nov 25 10:02:08 crc kubenswrapper[4769]: W1125 10:02:08.582801 4769 reflector.go:561] object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-b7bcg": failed to list *v1.Secret: secrets "openstack-operator-controller-operator-dockercfg-b7bcg" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack-operators": no relationship found between node 'crc' and this object Nov 25 10:02:08 crc kubenswrapper[4769]: E1125 10:02:08.582864 4769 reflector.go:158] "Unhandled Error" err="object-\"openstack-operators\"/\"openstack-operator-controller-operator-dockercfg-b7bcg\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openstack-operator-controller-operator-dockercfg-b7bcg\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack-operators\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 10:02:08 crc kubenswrapper[4769]: I1125 10:02:08.599143 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr"] Nov 25 10:02:08 crc kubenswrapper[4769]: I1125 10:02:08.663379 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtt9k\" (UniqueName: \"kubernetes.io/projected/6520a852-60ef-47d1-800b-633eae1655dd-kube-api-access-mtt9k\") pod \"openstack-operator-controller-operator-7fb4d7987d-w6ztr\" (UID: \"6520a852-60ef-47d1-800b-633eae1655dd\") " pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" Nov 25 10:02:08 crc kubenswrapper[4769]: I1125 10:02:08.764864 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtt9k\" (UniqueName: \"kubernetes.io/projected/6520a852-60ef-47d1-800b-633eae1655dd-kube-api-access-mtt9k\") pod \"openstack-operator-controller-operator-7fb4d7987d-w6ztr\" (UID: \"6520a852-60ef-47d1-800b-633eae1655dd\") " pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" Nov 25 10:02:08 crc kubenswrapper[4769]: I1125 10:02:08.786389 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtt9k\" (UniqueName: \"kubernetes.io/projected/6520a852-60ef-47d1-800b-633eae1655dd-kube-api-access-mtt9k\") pod \"openstack-operator-controller-operator-7fb4d7987d-w6ztr\" (UID: \"6520a852-60ef-47d1-800b-633eae1655dd\") " pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" Nov 25 10:02:09 crc kubenswrapper[4769]: I1125 10:02:09.812192 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-b7bcg" Nov 25 10:02:09 crc kubenswrapper[4769]: I1125 10:02:09.819842 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" Nov 25 10:02:10 crc kubenswrapper[4769]: I1125 10:02:10.365232 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr"] Nov 25 10:02:11 crc kubenswrapper[4769]: I1125 10:02:11.178545 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" event={"ID":"6520a852-60ef-47d1-800b-633eae1655dd","Type":"ContainerStarted","Data":"92b122eb9f83c9c9ff8cc4ced4cbfb123a9d70e0133198e42b0fbb075178d687"} Nov 25 10:02:15 crc kubenswrapper[4769]: I1125 10:02:15.223646 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" event={"ID":"6520a852-60ef-47d1-800b-633eae1655dd","Type":"ContainerStarted","Data":"25b080e51b398419543c27866dcc0fd85787fc6c99edbd8530e8e717eea1ab53"} Nov 25 10:02:15 crc kubenswrapper[4769]: I1125 10:02:15.224483 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" Nov 25 10:02:15 crc kubenswrapper[4769]: I1125 10:02:15.263452 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" podStartSLOduration=3.1055545430000002 podStartE2EDuration="7.263427205s" podCreationTimestamp="2025-11-25 10:02:08 +0000 UTC" firstStartedPulling="2025-11-25 10:02:10.375062825 +0000 UTC m=+1078.960035138" lastFinishedPulling="2025-11-25 10:02:14.532935477 +0000 UTC m=+1083.117907800" observedRunningTime="2025-11-25 10:02:15.258085163 +0000 UTC m=+1083.843057486" watchObservedRunningTime="2025-11-25 10:02:15.263427205 +0000 UTC m=+1083.848399528" Nov 25 10:02:19 crc kubenswrapper[4769]: I1125 10:02:19.823018 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" Nov 25 10:02:22 crc kubenswrapper[4769]: I1125 10:02:22.290607 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:02:22 crc kubenswrapper[4769]: I1125 10:02:22.290945 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:02:52 crc kubenswrapper[4769]: I1125 10:02:52.290369 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:02:52 crc kubenswrapper[4769]: I1125 10:02:52.290985 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.661368 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.664902 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.676479 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.678158 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.683423 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-2dhrc" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.684233 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.692184 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-whql5" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.730505 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.740751 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.744205 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.751999 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-cvdml" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.766049 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdjxf\" (UniqueName: \"kubernetes.io/projected/aaa65e3e-75e2-4f50-b9d6-aa9710a6e394-kube-api-access-vdjxf\") pod \"cinder-operator-controller-manager-79856dc55c-2hp9n\" (UID: \"aaa65e3e-75e2-4f50-b9d6-aa9710a6e394\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.766336 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhsxn\" (UniqueName: \"kubernetes.io/projected/b90cc789-8211-48bc-85cc-1a31ad1af486-kube-api-access-xhsxn\") pod \"barbican-operator-controller-manager-86dc4d89c8-zv5xr\" (UID: \"b90cc789-8211-48bc-85cc-1a31ad1af486\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.766363 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b2sq\" (UniqueName: \"kubernetes.io/projected/0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd-kube-api-access-8b2sq\") pod \"designate-operator-controller-manager-7d695c9b56-7xcjk\" (UID: \"0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.776456 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.786475 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.788479 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.794915 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-4zjb9" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.833788 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.839656 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.839795 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.852891 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.854662 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.860458 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-pgzgx" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.863913 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-rdrph" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.866016 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.868628 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2gxl\" (UniqueName: \"kubernetes.io/projected/67abea47-5e8a-43a2-8865-929cfdfc607c-kube-api-access-z2gxl\") pod \"heat-operator-controller-manager-774b86978c-w8ftq\" (UID: \"67abea47-5e8a-43a2-8865-929cfdfc607c\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.868743 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdjxf\" (UniqueName: \"kubernetes.io/projected/aaa65e3e-75e2-4f50-b9d6-aa9710a6e394-kube-api-access-vdjxf\") pod \"cinder-operator-controller-manager-79856dc55c-2hp9n\" (UID: \"aaa65e3e-75e2-4f50-b9d6-aa9710a6e394\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.868865 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lv6fp\" (UniqueName: \"kubernetes.io/projected/4894deb0-65ca-4b42-b397-4092a75739c9-kube-api-access-lv6fp\") pod \"glance-operator-controller-manager-68b95954c9-k2htr\" (UID: \"4894deb0-65ca-4b42-b397-4092a75739c9\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.869001 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dng96\" (UniqueName: \"kubernetes.io/projected/1da948d8-e834-488a-a3ec-a0c0229ebaf5-kube-api-access-dng96\") pod \"horizon-operator-controller-manager-68c9694994-m2qxd\" (UID: \"1da948d8-e834-488a-a3ec-a0c0229ebaf5\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.869132 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhsxn\" (UniqueName: \"kubernetes.io/projected/b90cc789-8211-48bc-85cc-1a31ad1af486-kube-api-access-xhsxn\") pod \"barbican-operator-controller-manager-86dc4d89c8-zv5xr\" (UID: \"b90cc789-8211-48bc-85cc-1a31ad1af486\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.869244 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b2sq\" (UniqueName: \"kubernetes.io/projected/0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd-kube-api-access-8b2sq\") pod \"designate-operator-controller-manager-7d695c9b56-7xcjk\" (UID: \"0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.914096 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.915819 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhsxn\" (UniqueName: \"kubernetes.io/projected/b90cc789-8211-48bc-85cc-1a31ad1af486-kube-api-access-xhsxn\") pod \"barbican-operator-controller-manager-86dc4d89c8-zv5xr\" (UID: \"b90cc789-8211-48bc-85cc-1a31ad1af486\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.915926 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.916250 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdjxf\" (UniqueName: \"kubernetes.io/projected/aaa65e3e-75e2-4f50-b9d6-aa9710a6e394-kube-api-access-vdjxf\") pod \"cinder-operator-controller-manager-79856dc55c-2hp9n\" (UID: \"aaa65e3e-75e2-4f50-b9d6-aa9710a6e394\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.923217 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.932825 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b2sq\" (UniqueName: \"kubernetes.io/projected/0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd-kube-api-access-8b2sq\") pod \"designate-operator-controller-manager-7d695c9b56-7xcjk\" (UID: \"0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.936353 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.936498 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-g5zsh" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.936571 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.949100 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.950776 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.956106 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-gt8nw" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.971653 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lv6fp\" (UniqueName: \"kubernetes.io/projected/4894deb0-65ca-4b42-b397-4092a75739c9-kube-api-access-lv6fp\") pod \"glance-operator-controller-manager-68b95954c9-k2htr\" (UID: \"4894deb0-65ca-4b42-b397-4092a75739c9\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.971733 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/136f8f90-5673-4a08-ab4b-c030c1c428a6-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-nktn6\" (UID: \"136f8f90-5673-4a08-ab4b-c030c1c428a6\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.971770 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwt67\" (UniqueName: \"kubernetes.io/projected/b59cac8b-fb36-4316-ab83-da7202b67af5-kube-api-access-vwt67\") pod \"ironic-operator-controller-manager-5bfcdc958c-47gs9\" (UID: \"b59cac8b-fb36-4316-ab83-da7202b67af5\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.971795 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dng96\" (UniqueName: \"kubernetes.io/projected/1da948d8-e834-488a-a3ec-a0c0229ebaf5-kube-api-access-dng96\") pod \"horizon-operator-controller-manager-68c9694994-m2qxd\" (UID: \"1da948d8-e834-488a-a3ec-a0c0229ebaf5\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.971836 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qp7m\" (UniqueName: \"kubernetes.io/projected/136f8f90-5673-4a08-ab4b-c030c1c428a6-kube-api-access-6qp7m\") pod \"infra-operator-controller-manager-d5cc86f4b-nktn6\" (UID: \"136f8f90-5673-4a08-ab4b-c030c1c428a6\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.971913 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2gxl\" (UniqueName: \"kubernetes.io/projected/67abea47-5e8a-43a2-8865-929cfdfc607c-kube-api-access-z2gxl\") pod \"heat-operator-controller-manager-774b86978c-w8ftq\" (UID: \"67abea47-5e8a-43a2-8865-929cfdfc607c\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.991056 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc"] Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.992799 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 10:02:54 crc kubenswrapper[4769]: I1125 10:02:54.999027 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.005834 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.007633 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-l4lh7" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.008179 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.029734 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lv6fp\" (UniqueName: \"kubernetes.io/projected/4894deb0-65ca-4b42-b397-4092a75739c9-kube-api-access-lv6fp\") pod \"glance-operator-controller-manager-68b95954c9-k2htr\" (UID: \"4894deb0-65ca-4b42-b397-4092a75739c9\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.029760 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2gxl\" (UniqueName: \"kubernetes.io/projected/67abea47-5e8a-43a2-8865-929cfdfc607c-kube-api-access-z2gxl\") pod \"heat-operator-controller-manager-774b86978c-w8ftq\" (UID: \"67abea47-5e8a-43a2-8865-929cfdfc607c\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.035140 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.078883 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dng96\" (UniqueName: \"kubernetes.io/projected/1da948d8-e834-488a-a3ec-a0c0229ebaf5-kube-api-access-dng96\") pod \"horizon-operator-controller-manager-68c9694994-m2qxd\" (UID: \"1da948d8-e834-488a-a3ec-a0c0229ebaf5\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.078985 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.082469 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.083039 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.093928 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-p726j" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.105151 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lslmw\" (UniqueName: \"kubernetes.io/projected/1f5735dc-67e5-423c-9a8f-d42977c892d3-kube-api-access-lslmw\") pod \"keystone-operator-controller-manager-748dc6576f-hh9nc\" (UID: \"1f5735dc-67e5-423c-9a8f-d42977c892d3\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.105211 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/136f8f90-5673-4a08-ab4b-c030c1c428a6-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-nktn6\" (UID: \"136f8f90-5673-4a08-ab4b-c030c1c428a6\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.105248 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwt67\" (UniqueName: \"kubernetes.io/projected/b59cac8b-fb36-4316-ab83-da7202b67af5-kube-api-access-vwt67\") pod \"ironic-operator-controller-manager-5bfcdc958c-47gs9\" (UID: \"b59cac8b-fb36-4316-ab83-da7202b67af5\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.105284 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wkcf\" (UniqueName: \"kubernetes.io/projected/63e66921-47a7-407a-b50e-06cf5cadb8be-kube-api-access-5wkcf\") pod \"manila-operator-controller-manager-58bb8d67cc-qdjsd\" (UID: \"63e66921-47a7-407a-b50e-06cf5cadb8be\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.105322 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qp7m\" (UniqueName: \"kubernetes.io/projected/136f8f90-5673-4a08-ab4b-c030c1c428a6-kube-api-access-6qp7m\") pod \"infra-operator-controller-manager-d5cc86f4b-nktn6\" (UID: \"136f8f90-5673-4a08-ab4b-c030c1c428a6\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 10:02:55 crc kubenswrapper[4769]: E1125 10:02:55.105826 4769 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 10:02:55 crc kubenswrapper[4769]: E1125 10:02:55.105877 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/136f8f90-5673-4a08-ab4b-c030c1c428a6-cert podName:136f8f90-5673-4a08-ab4b-c030c1c428a6 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:55.605856282 +0000 UTC m=+1124.190828595 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/136f8f90-5673-4a08-ab4b-c030c1c428a6-cert") pod "infra-operator-controller-manager-d5cc86f4b-nktn6" (UID: "136f8f90-5673-4a08-ab4b-c030c1c428a6") : secret "infra-operator-webhook-server-cert" not found Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.107911 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.116086 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.117886 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.123735 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.126452 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.129484 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-cxvnb" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.130300 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-wx5bm" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.130836 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.149548 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qp7m\" (UniqueName: \"kubernetes.io/projected/136f8f90-5673-4a08-ab4b-c030c1c428a6-kube-api-access-6qp7m\") pod \"infra-operator-controller-manager-d5cc86f4b-nktn6\" (UID: \"136f8f90-5673-4a08-ab4b-c030c1c428a6\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.189694 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.199570 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwt67\" (UniqueName: \"kubernetes.io/projected/b59cac8b-fb36-4316-ab83-da7202b67af5-kube-api-access-vwt67\") pod \"ironic-operator-controller-manager-5bfcdc958c-47gs9\" (UID: \"b59cac8b-fb36-4316-ab83-da7202b67af5\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.218159 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9glvj\" (UniqueName: \"kubernetes.io/projected/9d0ef7c9-7421-4fe2-b1c8-551253bea174-kube-api-access-9glvj\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-fknhz\" (UID: \"9d0ef7c9-7421-4fe2-b1c8-551253bea174\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.218283 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lslmw\" (UniqueName: \"kubernetes.io/projected/1f5735dc-67e5-423c-9a8f-d42977c892d3-kube-api-access-lslmw\") pod \"keystone-operator-controller-manager-748dc6576f-hh9nc\" (UID: \"1f5735dc-67e5-423c-9a8f-d42977c892d3\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.218310 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fxg9\" (UniqueName: \"kubernetes.io/projected/6d9be953-34ea-4956-96bf-84d5f8babb2d-kube-api-access-6fxg9\") pod \"neutron-operator-controller-manager-7c57c8bbc4-rgn2z\" (UID: \"6d9be953-34ea-4956-96bf-84d5f8babb2d\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.218386 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wkcf\" (UniqueName: \"kubernetes.io/projected/63e66921-47a7-407a-b50e-06cf5cadb8be-kube-api-access-5wkcf\") pod \"manila-operator-controller-manager-58bb8d67cc-qdjsd\" (UID: \"63e66921-47a7-407a-b50e-06cf5cadb8be\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.231551 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.256046 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wkcf\" (UniqueName: \"kubernetes.io/projected/63e66921-47a7-407a-b50e-06cf5cadb8be-kube-api-access-5wkcf\") pod \"manila-operator-controller-manager-58bb8d67cc-qdjsd\" (UID: \"63e66921-47a7-407a-b50e-06cf5cadb8be\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.273107 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.290706 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lslmw\" (UniqueName: \"kubernetes.io/projected/1f5735dc-67e5-423c-9a8f-d42977c892d3-kube-api-access-lslmw\") pod \"keystone-operator-controller-manager-748dc6576f-hh9nc\" (UID: \"1f5735dc-67e5-423c-9a8f-d42977c892d3\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.352374 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.352517 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.355767 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9glvj\" (UniqueName: \"kubernetes.io/projected/9d0ef7c9-7421-4fe2-b1c8-551253bea174-kube-api-access-9glvj\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-fknhz\" (UID: \"9d0ef7c9-7421-4fe2-b1c8-551253bea174\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.356037 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fxg9\" (UniqueName: \"kubernetes.io/projected/6d9be953-34ea-4956-96bf-84d5f8babb2d-kube-api-access-6fxg9\") pod \"neutron-operator-controller-manager-7c57c8bbc4-rgn2z\" (UID: \"6d9be953-34ea-4956-96bf-84d5f8babb2d\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.376085 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.383491 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-cjscj" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.412645 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.419247 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9glvj\" (UniqueName: \"kubernetes.io/projected/9d0ef7c9-7421-4fe2-b1c8-551253bea174-kube-api-access-9glvj\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-fknhz\" (UID: \"9d0ef7c9-7421-4fe2-b1c8-551253bea174\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.422774 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fxg9\" (UniqueName: \"kubernetes.io/projected/6d9be953-34ea-4956-96bf-84d5f8babb2d-kube-api-access-6fxg9\") pod \"neutron-operator-controller-manager-7c57c8bbc4-rgn2z\" (UID: \"6d9be953-34ea-4956-96bf-84d5f8babb2d\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.425943 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.427959 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.448192 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.456548 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.457914 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kx4gs\" (UniqueName: \"kubernetes.io/projected/1b8cd25d-43dd-4774-b1d9-59572bb6bef7-kube-api-access-kx4gs\") pod \"nova-operator-controller-manager-79556f57fc-rcv4n\" (UID: \"1b8cd25d-43dd-4774-b1d9-59572bb6bef7\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.458488 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.461172 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.461429 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-5jpzm" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.465364 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-79t9w" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.468635 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.471121 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.472827 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.482348 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-6n7lc" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.498042 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.501576 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.551057 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.553065 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.554912 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.556873 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-kpnqf" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.559245 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kx4gs\" (UniqueName: \"kubernetes.io/projected/1b8cd25d-43dd-4774-b1d9-59572bb6bef7-kube-api-access-kx4gs\") pod \"nova-operator-controller-manager-79556f57fc-rcv4n\" (UID: \"1b8cd25d-43dd-4774-b1d9-59572bb6bef7\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.559601 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md4pg\" (UniqueName: \"kubernetes.io/projected/169b77e8-e7b0-4d11-9915-2442f48d9347-kube-api-access-md4pg\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7\" (UID: \"169b77e8-e7b0-4d11-9915-2442f48d9347\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.559709 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/169b77e8-e7b0-4d11-9915-2442f48d9347-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7\" (UID: \"169b77e8-e7b0-4d11-9915-2442f48d9347\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.559763 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp7b7\" (UniqueName: \"kubernetes.io/projected/88cb8ad7-c855-45eb-a471-aacb8c42082c-kube-api-access-fp7b7\") pod \"octavia-operator-controller-manager-fd75fd47d-kbhzd\" (UID: \"88cb8ad7-c855-45eb-a471-aacb8c42082c\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.559785 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8g74\" (UniqueName: \"kubernetes.io/projected/a2f1ad69-27e4-4131-a742-a8d2c5df8636-kube-api-access-h8g74\") pod \"ovn-operator-controller-manager-66cf5c67ff-68bvf\" (UID: \"a2f1ad69-27e4-4131-a742-a8d2c5df8636\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.596182 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.601759 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.606054 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kx4gs\" (UniqueName: \"kubernetes.io/projected/1b8cd25d-43dd-4774-b1d9-59572bb6bef7-kube-api-access-kx4gs\") pod \"nova-operator-controller-manager-79556f57fc-rcv4n\" (UID: \"1b8cd25d-43dd-4774-b1d9-59572bb6bef7\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.618080 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.663456 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/136f8f90-5673-4a08-ab4b-c030c1c428a6-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-nktn6\" (UID: \"136f8f90-5673-4a08-ab4b-c030c1c428a6\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.663516 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md4pg\" (UniqueName: \"kubernetes.io/projected/169b77e8-e7b0-4d11-9915-2442f48d9347-kube-api-access-md4pg\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7\" (UID: \"169b77e8-e7b0-4d11-9915-2442f48d9347\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.663549 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/169b77e8-e7b0-4d11-9915-2442f48d9347-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7\" (UID: \"169b77e8-e7b0-4d11-9915-2442f48d9347\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.663581 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp7b7\" (UniqueName: \"kubernetes.io/projected/88cb8ad7-c855-45eb-a471-aacb8c42082c-kube-api-access-fp7b7\") pod \"octavia-operator-controller-manager-fd75fd47d-kbhzd\" (UID: \"88cb8ad7-c855-45eb-a471-aacb8c42082c\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.663609 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8g74\" (UniqueName: \"kubernetes.io/projected/a2f1ad69-27e4-4131-a742-a8d2c5df8636-kube-api-access-h8g74\") pod \"ovn-operator-controller-manager-66cf5c67ff-68bvf\" (UID: \"a2f1ad69-27e4-4131-a742-a8d2c5df8636\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.663639 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llppr\" (UniqueName: \"kubernetes.io/projected/1be22f03-8697-413b-922c-9344185c05c4-kube-api-access-llppr\") pod \"placement-operator-controller-manager-5db546f9d9-g8xqz\" (UID: \"1be22f03-8697-413b-922c-9344185c05c4\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 10:02:55 crc kubenswrapper[4769]: E1125 10:02:55.663808 4769 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 10:02:55 crc kubenswrapper[4769]: E1125 10:02:55.663859 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/136f8f90-5673-4a08-ab4b-c030c1c428a6-cert podName:136f8f90-5673-4a08-ab4b-c030c1c428a6 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:56.663840662 +0000 UTC m=+1125.248812975 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/136f8f90-5673-4a08-ab4b-c030c1c428a6-cert") pod "infra-operator-controller-manager-d5cc86f4b-nktn6" (UID: "136f8f90-5673-4a08-ab4b-c030c1c428a6") : secret "infra-operator-webhook-server-cert" not found Nov 25 10:02:55 crc kubenswrapper[4769]: E1125 10:02:55.664472 4769 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 10:02:55 crc kubenswrapper[4769]: E1125 10:02:55.664502 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/169b77e8-e7b0-4d11-9915-2442f48d9347-cert podName:169b77e8-e7b0-4d11-9915-2442f48d9347 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:56.164494 +0000 UTC m=+1124.749466313 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/169b77e8-e7b0-4d11-9915-2442f48d9347-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" (UID: "169b77e8-e7b0-4d11-9915-2442f48d9347") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.689262 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.691307 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.693127 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.700375 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-trs84" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.706467 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8g74\" (UniqueName: \"kubernetes.io/projected/a2f1ad69-27e4-4131-a742-a8d2c5df8636-kube-api-access-h8g74\") pod \"ovn-operator-controller-manager-66cf5c67ff-68bvf\" (UID: \"a2f1ad69-27e4-4131-a742-a8d2c5df8636\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.714382 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.726434 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp7b7\" (UniqueName: \"kubernetes.io/projected/88cb8ad7-c855-45eb-a471-aacb8c42082c-kube-api-access-fp7b7\") pod \"octavia-operator-controller-manager-fd75fd47d-kbhzd\" (UID: \"88cb8ad7-c855-45eb-a471-aacb8c42082c\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.727100 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.739011 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md4pg\" (UniqueName: \"kubernetes.io/projected/169b77e8-e7b0-4d11-9915-2442f48d9347-kube-api-access-md4pg\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7\" (UID: \"169b77e8-e7b0-4d11-9915-2442f48d9347\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.766443 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmdxn\" (UniqueName: \"kubernetes.io/projected/18e1910e-52b2-439b-a93f-4ffe63a7b992-kube-api-access-tmdxn\") pod \"swift-operator-controller-manager-6fdc4fcf86-wssdn\" (UID: \"18e1910e-52b2-439b-a93f-4ffe63a7b992\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.766939 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llppr\" (UniqueName: \"kubernetes.io/projected/1be22f03-8697-413b-922c-9344185c05c4-kube-api-access-llppr\") pod \"placement-operator-controller-manager-5db546f9d9-g8xqz\" (UID: \"1be22f03-8697-413b-922c-9344185c05c4\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.799947 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.801362 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.803180 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.813423 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-shlvx" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.825191 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-b25q9"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.826725 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.848268 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-xngcz" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.849327 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llppr\" (UniqueName: \"kubernetes.io/projected/1be22f03-8697-413b-922c-9344185c05c4-kube-api-access-llppr\") pod \"placement-operator-controller-manager-5db546f9d9-g8xqz\" (UID: \"1be22f03-8697-413b-922c-9344185c05c4\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.867886 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.868599 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmdxn\" (UniqueName: \"kubernetes.io/projected/18e1910e-52b2-439b-a93f-4ffe63a7b992-kube-api-access-tmdxn\") pod \"swift-operator-controller-manager-6fdc4fcf86-wssdn\" (UID: \"18e1910e-52b2-439b-a93f-4ffe63a7b992\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.868780 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jx6ql\" (UniqueName: \"kubernetes.io/projected/c1d16c0c-cca9-4794-8a52-c8674d9a069e-kube-api-access-jx6ql\") pod \"telemetry-operator-controller-manager-54cf759cb9-dcqfc\" (UID: \"c1d16c0c-cca9-4794-8a52-c8674d9a069e\") " pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.895226 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.920986 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.938260 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-b25q9"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.948071 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmdxn\" (UniqueName: \"kubernetes.io/projected/18e1910e-52b2-439b-a93f-4ffe63a7b992-kube-api-access-tmdxn\") pod \"swift-operator-controller-manager-6fdc4fcf86-wssdn\" (UID: \"18e1910e-52b2-439b-a93f-4ffe63a7b992\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.971167 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-rz7fl"] Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.972422 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jx6ql\" (UniqueName: \"kubernetes.io/projected/c1d16c0c-cca9-4794-8a52-c8674d9a069e-kube-api-access-jx6ql\") pod \"telemetry-operator-controller-manager-54cf759cb9-dcqfc\" (UID: \"c1d16c0c-cca9-4794-8a52-c8674d9a069e\") " pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.972609 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxwd5\" (UniqueName: \"kubernetes.io/projected/c21f4ff5-86fa-44f0-993f-59189de57182-kube-api-access-fxwd5\") pod \"test-operator-controller-manager-5cb74df96-b25q9\" (UID: \"c21f4ff5-86fa-44f0-993f-59189de57182\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.972995 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.987452 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-c4qrf" Nov 25 10:02:55 crc kubenswrapper[4769]: I1125 10:02:55.997893 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-rz7fl"] Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.027503 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jx6ql\" (UniqueName: \"kubernetes.io/projected/c1d16c0c-cca9-4794-8a52-c8674d9a069e-kube-api-access-jx6ql\") pod \"telemetry-operator-controller-manager-54cf759cb9-dcqfc\" (UID: \"c1d16c0c-cca9-4794-8a52-c8674d9a069e\") " pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.046528 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.049044 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g"] Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.069102 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.076335 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.076831 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-gqg8c" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.076952 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.078029 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxwd5\" (UniqueName: \"kubernetes.io/projected/c21f4ff5-86fa-44f0-993f-59189de57182-kube-api-access-fxwd5\") pod \"test-operator-controller-manager-5cb74df96-b25q9\" (UID: \"c21f4ff5-86fa-44f0-993f-59189de57182\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.081049 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zb9p\" (UniqueName: \"kubernetes.io/projected/8e7436d0-2ff7-4a11-9ab8-74a91e56de4a-kube-api-access-6zb9p\") pod \"watcher-operator-controller-manager-864885998-rz7fl\" (UID: \"8e7436d0-2ff7-4a11-9ab8-74a91e56de4a\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.112785 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g"] Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.128822 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxwd5\" (UniqueName: \"kubernetes.io/projected/c21f4ff5-86fa-44f0-993f-59189de57182-kube-api-access-fxwd5\") pod \"test-operator-controller-manager-5cb74df96-b25q9\" (UID: \"c21f4ff5-86fa-44f0-993f-59189de57182\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.148944 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr"] Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.187100 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.190053 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/169b77e8-e7b0-4d11-9915-2442f48d9347-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7\" (UID: \"169b77e8-e7b0-4d11-9915-2442f48d9347\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.190115 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zb9p\" (UniqueName: \"kubernetes.io/projected/8e7436d0-2ff7-4a11-9ab8-74a91e56de4a-kube-api-access-6zb9p\") pod \"watcher-operator-controller-manager-864885998-rz7fl\" (UID: \"8e7436d0-2ff7-4a11-9ab8-74a91e56de4a\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.190156 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-metrics-certs\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.190206 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jctld\" (UniqueName: \"kubernetes.io/projected/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-kube-api-access-jctld\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.190271 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-webhook-certs\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:56 crc kubenswrapper[4769]: E1125 10:02:56.191907 4769 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 10:02:56 crc kubenswrapper[4769]: E1125 10:02:56.200090 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/169b77e8-e7b0-4d11-9915-2442f48d9347-cert podName:169b77e8-e7b0-4d11-9915-2442f48d9347 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:57.199998047 +0000 UTC m=+1125.784970360 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/169b77e8-e7b0-4d11-9915-2442f48d9347-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" (UID: "169b77e8-e7b0-4d11-9915-2442f48d9347") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.240380 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zb9p\" (UniqueName: \"kubernetes.io/projected/8e7436d0-2ff7-4a11-9ab8-74a91e56de4a-kube-api-access-6zb9p\") pod \"watcher-operator-controller-manager-864885998-rz7fl\" (UID: \"8e7436d0-2ff7-4a11-9ab8-74a91e56de4a\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.291114 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jctld\" (UniqueName: \"kubernetes.io/projected/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-kube-api-access-jctld\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.291206 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-webhook-certs\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.291348 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-metrics-certs\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:56 crc kubenswrapper[4769]: E1125 10:02:56.291497 4769 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 10:02:56 crc kubenswrapper[4769]: E1125 10:02:56.291563 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-metrics-certs podName:70f81d0a-db58-4bd4-a0e2-ee1c03e2f923 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:56.791543205 +0000 UTC m=+1125.376515518 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-metrics-certs") pod "openstack-operator-controller-manager-6c6f9bd7cc-cp48g" (UID: "70f81d0a-db58-4bd4-a0e2-ee1c03e2f923") : secret "metrics-server-cert" not found Nov 25 10:02:56 crc kubenswrapper[4769]: E1125 10:02:56.291856 4769 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 10:02:56 crc kubenswrapper[4769]: E1125 10:02:56.291929 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-webhook-certs podName:70f81d0a-db58-4bd4-a0e2-ee1c03e2f923 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:56.791905965 +0000 UTC m=+1125.376878278 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-webhook-certs") pod "openstack-operator-controller-manager-6c6f9bd7cc-cp48g" (UID: "70f81d0a-db58-4bd4-a0e2-ee1c03e2f923") : secret "webhook-server-cert" not found Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.320934 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jctld\" (UniqueName: \"kubernetes.io/projected/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-kube-api-access-jctld\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.326998 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr"] Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.330173 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr"] Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.330373 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.336125 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.336481 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-6z7lp" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.380584 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.503406 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jdbr\" (UniqueName: \"kubernetes.io/projected/a032414e-4be2-47f7-ac88-3bdec0ccb151-kube-api-access-2jdbr\") pod \"rabbitmq-cluster-operator-manager-668c99d594-bzrbr\" (UID: \"a032414e-4be2-47f7-ac88-3bdec0ccb151\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.608567 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jdbr\" (UniqueName: \"kubernetes.io/projected/a032414e-4be2-47f7-ac88-3bdec0ccb151-kube-api-access-2jdbr\") pod \"rabbitmq-cluster-operator-manager-668c99d594-bzrbr\" (UID: \"a032414e-4be2-47f7-ac88-3bdec0ccb151\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.643559 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jdbr\" (UniqueName: \"kubernetes.io/projected/a032414e-4be2-47f7-ac88-3bdec0ccb151-kube-api-access-2jdbr\") pod \"rabbitmq-cluster-operator-manager-668c99d594-bzrbr\" (UID: \"a032414e-4be2-47f7-ac88-3bdec0ccb151\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.654593 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr"] Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.666778 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.711234 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/136f8f90-5673-4a08-ab4b-c030c1c428a6-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-nktn6\" (UID: \"136f8f90-5673-4a08-ab4b-c030c1c428a6\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.717837 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/136f8f90-5673-4a08-ab4b-c030c1c428a6-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-nktn6\" (UID: \"136f8f90-5673-4a08-ab4b-c030c1c428a6\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.765511 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" event={"ID":"4894deb0-65ca-4b42-b397-4092a75739c9","Type":"ContainerStarted","Data":"6201188e7deb0197a16b8d4c0bdf10c9b3696cbf7925ac4ccd50f363e5a7cfaf"} Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.769884 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" event={"ID":"b90cc789-8211-48bc-85cc-1a31ad1af486","Type":"ContainerStarted","Data":"41e75df04801e654e06e96ce1deb2f428ee26f7df784e5889c14f0985f99bd3d"} Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.813171 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-webhook-certs\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.813292 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-metrics-certs\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:56 crc kubenswrapper[4769]: E1125 10:02:56.813475 4769 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 10:02:56 crc kubenswrapper[4769]: E1125 10:02:56.813553 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-metrics-certs podName:70f81d0a-db58-4bd4-a0e2-ee1c03e2f923 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:57.813531315 +0000 UTC m=+1126.398503618 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-metrics-certs") pod "openstack-operator-controller-manager-6c6f9bd7cc-cp48g" (UID: "70f81d0a-db58-4bd4-a0e2-ee1c03e2f923") : secret "metrics-server-cert" not found Nov 25 10:02:56 crc kubenswrapper[4769]: E1125 10:02:56.814063 4769 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 10:02:56 crc kubenswrapper[4769]: E1125 10:02:56.814091 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-webhook-certs podName:70f81d0a-db58-4bd4-a0e2-ee1c03e2f923 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:57.81408402 +0000 UTC m=+1126.399056333 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-webhook-certs") pod "openstack-operator-controller-manager-6c6f9bd7cc-cp48g" (UID: "70f81d0a-db58-4bd4-a0e2-ee1c03e2f923") : secret "webhook-server-cert" not found Nov 25 10:02:56 crc kubenswrapper[4769]: I1125 10:02:56.815768 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.084253 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9"] Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.104683 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq"] Nov 25 10:02:57 crc kubenswrapper[4769]: W1125 10:02:57.111488 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb59cac8b_fb36_4316_ab83_da7202b67af5.slice/crio-0989dedd7d767731775de660f3d0395d1787ad56b3bad8f5432b6bab55467f8d WatchSource:0}: Error finding container 0989dedd7d767731775de660f3d0395d1787ad56b3bad8f5432b6bab55467f8d: Status 404 returned error can't find the container with id 0989dedd7d767731775de660f3d0395d1787ad56b3bad8f5432b6bab55467f8d Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.114217 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd"] Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.130363 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n"] Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.157667 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk"] Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.233490 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/169b77e8-e7b0-4d11-9915-2442f48d9347-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7\" (UID: \"169b77e8-e7b0-4d11-9915-2442f48d9347\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:02:57 crc kubenswrapper[4769]: E1125 10:02:57.233719 4769 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 10:02:57 crc kubenswrapper[4769]: E1125 10:02:57.233875 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/169b77e8-e7b0-4d11-9915-2442f48d9347-cert podName:169b77e8-e7b0-4d11-9915-2442f48d9347 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:59.233856489 +0000 UTC m=+1127.818828802 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/169b77e8-e7b0-4d11-9915-2442f48d9347-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" (UID: "169b77e8-e7b0-4d11-9915-2442f48d9347") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.484240 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz"] Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.501720 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc"] Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.820404 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" event={"ID":"9d0ef7c9-7421-4fe2-b1c8-551253bea174","Type":"ContainerStarted","Data":"dbeb7faed40aa5e4dfba814926dd4de7086779a693e171d206f523691ba13018"} Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.850124 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" event={"ID":"1f5735dc-67e5-423c-9a8f-d42977c892d3","Type":"ContainerStarted","Data":"95fd181978054a6ddc08d96ef68be35545d3ba0af83f3b39c04725abc9d3e709"} Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.851704 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-webhook-certs\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.851821 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-metrics-certs\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:57 crc kubenswrapper[4769]: E1125 10:02:57.852030 4769 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 10:02:57 crc kubenswrapper[4769]: E1125 10:02:57.852173 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-webhook-certs podName:70f81d0a-db58-4bd4-a0e2-ee1c03e2f923 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:59.852148273 +0000 UTC m=+1128.437120586 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-webhook-certs") pod "openstack-operator-controller-manager-6c6f9bd7cc-cp48g" (UID: "70f81d0a-db58-4bd4-a0e2-ee1c03e2f923") : secret "webhook-server-cert" not found Nov 25 10:02:57 crc kubenswrapper[4769]: E1125 10:02:57.852046 4769 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 10:02:57 crc kubenswrapper[4769]: E1125 10:02:57.853016 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-metrics-certs podName:70f81d0a-db58-4bd4-a0e2-ee1c03e2f923 nodeName:}" failed. No retries permitted until 2025-11-25 10:02:59.853004406 +0000 UTC m=+1128.437976899 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-metrics-certs") pod "openstack-operator-controller-manager-6c6f9bd7cc-cp48g" (UID: "70f81d0a-db58-4bd4-a0e2-ee1c03e2f923") : secret "metrics-server-cert" not found Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.915334 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" event={"ID":"1da948d8-e834-488a-a3ec-a0c0229ebaf5","Type":"ContainerStarted","Data":"858fc79411f3a0e84848e3b44de921764fc2064d53a5cc19d82c21cf6a6c9564"} Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.940417 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" event={"ID":"0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd","Type":"ContainerStarted","Data":"3bed4a312ce17c2e60eeb2899f70d055e823217f67545eac9170cdc1798c5553"} Nov 25 10:02:57 crc kubenswrapper[4769]: I1125 10:02:57.989495 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" event={"ID":"aaa65e3e-75e2-4f50-b9d6-aa9710a6e394","Type":"ContainerStarted","Data":"d2d70d831223895e429f07cac75c466bbe1bf63e1335f84cff77ff092d90b68b"} Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.001151 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" event={"ID":"67abea47-5e8a-43a2-8865-929cfdfc607c","Type":"ContainerStarted","Data":"8b4e1bd1acc186e281e8caf11c69b1c93dd59218e9ac3a1deb73558967f520ae"} Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.027652 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" event={"ID":"b59cac8b-fb36-4316-ab83-da7202b67af5","Type":"ContainerStarted","Data":"0989dedd7d767731775de660f3d0395d1787ad56b3bad8f5432b6bab55467f8d"} Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.513844 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn"] Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.545077 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z"] Nov 25 10:02:58 crc kubenswrapper[4769]: W1125 10:02:58.546729 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod18e1910e_52b2_439b_a93f_4ffe63a7b992.slice/crio-52a19dff412cd9b3675faa6f949f8cf120a450378b0aa285b431fc191e0c2d2a WatchSource:0}: Error finding container 52a19dff412cd9b3675faa6f949f8cf120a450378b0aa285b431fc191e0c2d2a: Status 404 returned error can't find the container with id 52a19dff412cd9b3675faa6f949f8cf120a450378b0aa285b431fc191e0c2d2a Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.637417 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc"] Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.676167 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd"] Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.694393 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-b25q9"] Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.743546 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz"] Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.758342 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n"] Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.787356 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf"] Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.802453 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd"] Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.817655 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-rz7fl"] Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.872049 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6"] Nov 25 10:02:58 crc kubenswrapper[4769]: W1125 10:02:58.874744 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod136f8f90_5673_4a08_ab4b_c030c1c428a6.slice/crio-2c3c432bc06cebab1f1ed55817c7f73986a659642cd4fc22b184a1fe76addca6 WatchSource:0}: Error finding container 2c3c432bc06cebab1f1ed55817c7f73986a659642cd4fc22b184a1fe76addca6: Status 404 returned error can't find the container with id 2c3c432bc06cebab1f1ed55817c7f73986a659642cd4fc22b184a1fe76addca6 Nov 25 10:02:58 crc kubenswrapper[4769]: I1125 10:02:58.885614 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr"] Nov 25 10:02:58 crc kubenswrapper[4769]: E1125 10:02:58.900808 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2jdbr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-bzrbr_openstack-operators(a032414e-4be2-47f7-ac88-3bdec0ccb151): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 10:02:58 crc kubenswrapper[4769]: E1125 10:02:58.902228 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" podUID="a032414e-4be2-47f7-ac88-3bdec0ccb151" Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.077232 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" event={"ID":"6d9be953-34ea-4956-96bf-84d5f8babb2d","Type":"ContainerStarted","Data":"f1696babc4961e587f83ab5a98f6b76d36904d562dd555902d719f00ca648cad"} Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.099918 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" event={"ID":"8e7436d0-2ff7-4a11-9ab8-74a91e56de4a","Type":"ContainerStarted","Data":"6438bf27606bd544854d938bb07111cb38fab3d0973fbc12e30b787b0fa4f751"} Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.107779 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" event={"ID":"1b8cd25d-43dd-4774-b1d9-59572bb6bef7","Type":"ContainerStarted","Data":"68514354017fcaf7a9378736cd23c89c2ad3ad65b8c1afc750f648b83bad03b3"} Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.110904 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" event={"ID":"1be22f03-8697-413b-922c-9344185c05c4","Type":"ContainerStarted","Data":"400db1b6e2cb4197fd5696c10b94dc829703c8d1fd940eea156afb288b1dc818"} Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.115657 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" event={"ID":"63e66921-47a7-407a-b50e-06cf5cadb8be","Type":"ContainerStarted","Data":"0d11443373e4f3330757600edaa03760101a54255904dc0e55833f6b78f1b97d"} Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.127832 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" event={"ID":"136f8f90-5673-4a08-ab4b-c030c1c428a6","Type":"ContainerStarted","Data":"2c3c432bc06cebab1f1ed55817c7f73986a659642cd4fc22b184a1fe76addca6"} Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.129732 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" event={"ID":"18e1910e-52b2-439b-a93f-4ffe63a7b992","Type":"ContainerStarted","Data":"52a19dff412cd9b3675faa6f949f8cf120a450378b0aa285b431fc191e0c2d2a"} Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.148430 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" event={"ID":"88cb8ad7-c855-45eb-a471-aacb8c42082c","Type":"ContainerStarted","Data":"b3581410701d58faebc6a4e230f5e8dd6374eb09ae977cc17cfe6b69c32ff1b7"} Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.164448 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" event={"ID":"a032414e-4be2-47f7-ac88-3bdec0ccb151","Type":"ContainerStarted","Data":"bd812d129d3ade66c59dd0c67c756aa53da2d216acb922e1dcc6f277060e1ed0"} Nov 25 10:02:59 crc kubenswrapper[4769]: E1125 10:02:59.169043 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" podUID="a032414e-4be2-47f7-ac88-3bdec0ccb151" Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.171614 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" event={"ID":"c21f4ff5-86fa-44f0-993f-59189de57182","Type":"ContainerStarted","Data":"6b3d1759a92504b26fcdc5b7dd7c8b24d0cb8bea27840be4436968de2b68fe23"} Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.177733 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" event={"ID":"c1d16c0c-cca9-4794-8a52-c8674d9a069e","Type":"ContainerStarted","Data":"d74de13feef0d14fff692ded0612bb8902da1b751d569c6f52fcea6342060656"} Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.180390 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" event={"ID":"a2f1ad69-27e4-4131-a742-a8d2c5df8636","Type":"ContainerStarted","Data":"4b4fd571d88d7fe38e6dfc362adf09c57325f429fffe9fd5df140f79f9ea5db6"} Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.303882 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/169b77e8-e7b0-4d11-9915-2442f48d9347-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7\" (UID: \"169b77e8-e7b0-4d11-9915-2442f48d9347\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.334886 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/169b77e8-e7b0-4d11-9915-2442f48d9347-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7\" (UID: \"169b77e8-e7b0-4d11-9915-2442f48d9347\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.433870 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.913692 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-metrics-certs\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.915156 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-webhook-certs\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.920293 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-metrics-certs\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:02:59 crc kubenswrapper[4769]: I1125 10:02:59.962568 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/70f81d0a-db58-4bd4-a0e2-ee1c03e2f923-webhook-certs\") pod \"openstack-operator-controller-manager-6c6f9bd7cc-cp48g\" (UID: \"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923\") " pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:03:00 crc kubenswrapper[4769]: I1125 10:03:00.223386 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7"] Nov 25 10:03:00 crc kubenswrapper[4769]: I1125 10:03:00.252679 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:03:00 crc kubenswrapper[4769]: E1125 10:03:00.277472 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" podUID="a032414e-4be2-47f7-ac88-3bdec0ccb151" Nov 25 10:03:00 crc kubenswrapper[4769]: I1125 10:03:00.872788 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g"] Nov 25 10:03:01 crc kubenswrapper[4769]: I1125 10:03:01.312178 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" event={"ID":"169b77e8-e7b0-4d11-9915-2442f48d9347","Type":"ContainerStarted","Data":"4783451627fbe9c03d4042fe5a9488d7aa3609b7d8812a5c5ad1943712c11e3c"} Nov 25 10:03:01 crc kubenswrapper[4769]: I1125 10:03:01.318231 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" event={"ID":"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923","Type":"ContainerStarted","Data":"f2f702622075fd8897fa74752169c455d76f56eb0778b81be7a7fefc17e80c96"} Nov 25 10:03:01 crc kubenswrapper[4769]: I1125 10:03:01.319633 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:03:01 crc kubenswrapper[4769]: I1125 10:03:01.351106 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" podStartSLOduration=6.351085817 podStartE2EDuration="6.351085817s" podCreationTimestamp="2025-11-25 10:02:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:03:01.345593502 +0000 UTC m=+1129.930565815" watchObservedRunningTime="2025-11-25 10:03:01.351085817 +0000 UTC m=+1129.936058130" Nov 25 10:03:02 crc kubenswrapper[4769]: I1125 10:03:02.394594 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" event={"ID":"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923","Type":"ContainerStarted","Data":"1121da9064cc527187b8a3a839889c52f4f1b9643b4d85cf4b8fd8e71179d86a"} Nov 25 10:03:10 crc kubenswrapper[4769]: I1125 10:03:10.269911 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 10:03:12 crc kubenswrapper[4769]: E1125 10:03:12.250023 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:b582189b55fddc180a6d468c9dba7078009a693db37b4093d4ba0c99ec675377" Nov 25 10:03:12 crc kubenswrapper[4769]: E1125 10:03:12.262479 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:b582189b55fddc180a6d468c9dba7078009a693db37b4093d4ba0c99ec675377,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vwt67,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-5bfcdc958c-47gs9_openstack-operators(b59cac8b-fb36-4316-ab83-da7202b67af5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:12 crc kubenswrapper[4769]: E1125 10:03:12.880783 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c" Nov 25 10:03:12 crc kubenswrapper[4769]: E1125 10:03:12.881075 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-llppr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5db546f9d9-g8xqz_openstack-operators(1be22f03-8697-413b-922c-9344185c05c4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:13 crc kubenswrapper[4769]: E1125 10:03:13.415228 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a" Nov 25 10:03:13 crc kubenswrapper[4769]: E1125 10:03:13.415470 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5wkcf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-58bb8d67cc-qdjsd_openstack-operators(63e66921-47a7-407a-b50e-06cf5cadb8be): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:16 crc kubenswrapper[4769]: E1125 10:03:16.726617 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a" Nov 25 10:03:16 crc kubenswrapper[4769]: E1125 10:03:16.728428 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lslmw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-748dc6576f-hh9nc_openstack-operators(1f5735dc-67e5-423c-9a8f-d42977c892d3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:17 crc kubenswrapper[4769]: E1125 10:03:17.423910 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:c6405d94e56b40ef669729216ab4b9c441f34bb280902efa2940038c076b560f" Nov 25 10:03:17 crc kubenswrapper[4769]: E1125 10:03:17.424184 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:c6405d94e56b40ef669729216ab4b9c441f34bb280902efa2940038c076b560f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8b2sq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-7d695c9b56-7xcjk_openstack-operators(0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:18 crc kubenswrapper[4769]: E1125 10:03:18.177039 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:553b1288b330ad05771d59c6b73c1681c95f457e8475682f9ad0d2e6b85f37e9" Nov 25 10:03:18 crc kubenswrapper[4769]: E1125 10:03:18.177742 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:553b1288b330ad05771d59c6b73c1681c95f457e8475682f9ad0d2e6b85f37e9,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vdjxf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-79856dc55c-2hp9n_openstack-operators(aaa65e3e-75e2-4f50-b9d6-aa9710a6e394): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:21 crc kubenswrapper[4769]: E1125 10:03:21.164224 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991" Nov 25 10:03:21 crc kubenswrapper[4769]: E1125 10:03:21.164890 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lv6fp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-68b95954c9-k2htr_openstack-operators(4894deb0-65ca-4b42-b397-4092a75739c9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:22 crc kubenswrapper[4769]: E1125 10:03:22.038890 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0" Nov 25 10:03:22 crc kubenswrapper[4769]: E1125 10:03:22.039157 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tmdxn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-wssdn_openstack-operators(18e1910e-52b2-439b-a93f-4ffe63a7b992): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:22 crc kubenswrapper[4769]: I1125 10:03:22.290624 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:03:22 crc kubenswrapper[4769]: I1125 10:03:22.290718 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:03:22 crc kubenswrapper[4769]: I1125 10:03:22.290791 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 10:03:22 crc kubenswrapper[4769]: I1125 10:03:22.291720 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cb83aff5daeabf3c346d649af9f679bac8007d12e211a6552aed7e17452f752f"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:03:22 crc kubenswrapper[4769]: I1125 10:03:22.291808 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://cb83aff5daeabf3c346d649af9f679bac8007d12e211a6552aed7e17452f752f" gracePeriod=600 Nov 25 10:03:22 crc kubenswrapper[4769]: E1125 10:03:22.567019 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:7b90521b9e9cb4eb43c2f1c3bf85dbd068d684315f4f705b07708dd078df9d04" Nov 25 10:03:22 crc kubenswrapper[4769]: E1125 10:03:22.567241 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:7b90521b9e9cb4eb43c2f1c3bf85dbd068d684315f4f705b07708dd078df9d04,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9glvj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-cb6c4fdb7-fknhz_openstack-operators(9d0ef7c9-7421-4fe2-b1c8-551253bea174): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:22 crc kubenswrapper[4769]: I1125 10:03:22.638259 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="cb83aff5daeabf3c346d649af9f679bac8007d12e211a6552aed7e17452f752f" exitCode=0 Nov 25 10:03:22 crc kubenswrapper[4769]: I1125 10:03:22.638482 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"cb83aff5daeabf3c346d649af9f679bac8007d12e211a6552aed7e17452f752f"} Nov 25 10:03:22 crc kubenswrapper[4769]: I1125 10:03:22.638800 4769 scope.go:117] "RemoveContainer" containerID="890cbbbb644d2d20db67bdcbf036dd9908902406f9cea9d984225deeb8a33fbe" Nov 25 10:03:23 crc kubenswrapper[4769]: E1125 10:03:23.152279 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6" Nov 25 10:03:23 crc kubenswrapper[4769]: E1125 10:03:23.152506 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6fxg9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-7c57c8bbc4-rgn2z_openstack-operators(6d9be953-34ea-4956-96bf-84d5f8babb2d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:23 crc kubenswrapper[4769]: E1125 10:03:23.752559 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7" Nov 25 10:03:23 crc kubenswrapper[4769]: E1125 10:03:23.752941 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kx4gs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-rcv4n_openstack-operators(1b8cd25d-43dd-4774-b1d9-59572bb6bef7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:25 crc kubenswrapper[4769]: E1125 10:03:25.426726 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d" Nov 25 10:03:25 crc kubenswrapper[4769]: E1125 10:03:25.427655 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fxwd5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cb74df96-b25q9_openstack-operators(c21f4ff5-86fa-44f0-993f-59189de57182): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:27 crc kubenswrapper[4769]: E1125 10:03:27.607655 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd" Nov 25 10:03:27 crc kubenswrapper[4769]: E1125 10:03:27.614794 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-md4pg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7_openstack-operators(169b77e8-e7b0-4d11-9915-2442f48d9347): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:27 crc kubenswrapper[4769]: E1125 10:03:27.757323 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.75:5001/openstack-k8s-operators/telemetry-operator:28a8c1cf37b45ade24203f1ec8f593431858d288" Nov 25 10:03:27 crc kubenswrapper[4769]: E1125 10:03:27.757406 4769 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.75:5001/openstack-k8s-operators/telemetry-operator:28a8c1cf37b45ade24203f1ec8f593431858d288" Nov 25 10:03:27 crc kubenswrapper[4769]: E1125 10:03:27.757609 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.75:5001/openstack-k8s-operators/telemetry-operator:28a8c1cf37b45ade24203f1ec8f593431858d288,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jx6ql,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-54cf759cb9-dcqfc_openstack-operators(c1d16c0c-cca9-4794-8a52-c8674d9a069e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:30 crc kubenswrapper[4769]: E1125 10:03:30.041617 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f" Nov 25 10:03:30 crc kubenswrapper[4769]: E1125 10:03:30.042260 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6zb9p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-rz7fl_openstack-operators(8e7436d0-2ff7-4a11-9ab8-74a91e56de4a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:32 crc kubenswrapper[4769]: E1125 10:03:32.742180 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 25 10:03:32 crc kubenswrapper[4769]: E1125 10:03:32.742890 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2jdbr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-bzrbr_openstack-operators(a032414e-4be2-47f7-ac88-3bdec0ccb151): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:03:32 crc kubenswrapper[4769]: E1125 10:03:32.745265 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" podUID="a032414e-4be2-47f7-ac88-3bdec0ccb151" Nov 25 10:03:33 crc kubenswrapper[4769]: I1125 10:03:33.841994 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" event={"ID":"b90cc789-8211-48bc-85cc-1a31ad1af486","Type":"ContainerStarted","Data":"ac289fa8350a70a6ad5e0d6a6e2d17b36cce60a1cca3f86da81a58a9be17bc34"} Nov 25 10:03:33 crc kubenswrapper[4769]: I1125 10:03:33.847001 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"94adcb75d0d9250faebaf531d11103a1b9f1da8a7a156454e1964c577d865b3a"} Nov 25 10:03:34 crc kubenswrapper[4769]: I1125 10:03:34.856057 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" event={"ID":"88cb8ad7-c855-45eb-a471-aacb8c42082c","Type":"ContainerStarted","Data":"396c9684073cbf9cfbac8196a83452dc17e63a68e71e196a5bd3cfd3d00a3ff8"} Nov 25 10:03:34 crc kubenswrapper[4769]: I1125 10:03:34.858574 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" event={"ID":"1da948d8-e834-488a-a3ec-a0c0229ebaf5","Type":"ContainerStarted","Data":"946ca39c4af07e3b66da42eb0fe6d94359578b66083e6fe55433cc33fabbf671"} Nov 25 10:03:34 crc kubenswrapper[4769]: I1125 10:03:34.861939 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" event={"ID":"a2f1ad69-27e4-4131-a742-a8d2c5df8636","Type":"ContainerStarted","Data":"00477b785738787aa94d9a777223971820e93dafb0bf8688e595b5cf3205784e"} Nov 25 10:03:34 crc kubenswrapper[4769]: I1125 10:03:34.865199 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" event={"ID":"67abea47-5e8a-43a2-8865-929cfdfc607c","Type":"ContainerStarted","Data":"b91c9f589a2efc6abf9221b2ac5a1520cdfe6e110dd005fda6c245342058b4e0"} Nov 25 10:03:35 crc kubenswrapper[4769]: I1125 10:03:35.879862 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" event={"ID":"136f8f90-5673-4a08-ab4b-c030c1c428a6","Type":"ContainerStarted","Data":"3a1d89eab4ebc4eb141327b74163cc1f4e865b034ffb2645df93f4aef500b974"} Nov 25 10:03:36 crc kubenswrapper[4769]: E1125 10:03:36.825992 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" podUID="6d9be953-34ea-4956-96bf-84d5f8babb2d" Nov 25 10:03:36 crc kubenswrapper[4769]: E1125 10:03:36.834560 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" podUID="63e66921-47a7-407a-b50e-06cf5cadb8be" Nov 25 10:03:36 crc kubenswrapper[4769]: I1125 10:03:36.901488 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" event={"ID":"63e66921-47a7-407a-b50e-06cf5cadb8be","Type":"ContainerStarted","Data":"ae44e444cb3158f307abd6f37d2bac6354b69c26a160f6d478c7e4bfcb1cf381"} Nov 25 10:03:36 crc kubenswrapper[4769]: I1125 10:03:36.912275 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" event={"ID":"6d9be953-34ea-4956-96bf-84d5f8babb2d","Type":"ContainerStarted","Data":"5efcb5a3dccd5cfb582024d0c5a2d98f2f21438c7309e9c74f062b88f28115e9"} Nov 25 10:03:36 crc kubenswrapper[4769]: E1125 10:03:36.951319 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" podUID="c21f4ff5-86fa-44f0-993f-59189de57182" Nov 25 10:03:36 crc kubenswrapper[4769]: E1125 10:03:36.974468 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" podUID="1b8cd25d-43dd-4774-b1d9-59572bb6bef7" Nov 25 10:03:36 crc kubenswrapper[4769]: E1125 10:03:36.983538 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" podUID="0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd" Nov 25 10:03:37 crc kubenswrapper[4769]: E1125 10:03:37.170077 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" podUID="8e7436d0-2ff7-4a11-9ab8-74a91e56de4a" Nov 25 10:03:37 crc kubenswrapper[4769]: E1125 10:03:37.185849 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" podUID="18e1910e-52b2-439b-a93f-4ffe63a7b992" Nov 25 10:03:37 crc kubenswrapper[4769]: E1125 10:03:37.331267 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" podUID="9d0ef7c9-7421-4fe2-b1c8-551253bea174" Nov 25 10:03:37 crc kubenswrapper[4769]: E1125 10:03:37.398930 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" podUID="1be22f03-8697-413b-922c-9344185c05c4" Nov 25 10:03:37 crc kubenswrapper[4769]: E1125 10:03:37.463834 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" podUID="169b77e8-e7b0-4d11-9915-2442f48d9347" Nov 25 10:03:37 crc kubenswrapper[4769]: E1125 10:03:37.522645 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" podUID="aaa65e3e-75e2-4f50-b9d6-aa9710a6e394" Nov 25 10:03:37 crc kubenswrapper[4769]: E1125 10:03:37.712430 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" podUID="c1d16c0c-cca9-4794-8a52-c8674d9a069e" Nov 25 10:03:37 crc kubenswrapper[4769]: E1125 10:03:37.735291 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" podUID="b59cac8b-fb36-4316-ab83-da7202b67af5" Nov 25 10:03:37 crc kubenswrapper[4769]: E1125 10:03:37.882836 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" podUID="1f5735dc-67e5-423c-9a8f-d42977c892d3" Nov 25 10:03:37 crc kubenswrapper[4769]: I1125 10:03:37.946200 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" event={"ID":"b90cc789-8211-48bc-85cc-1a31ad1af486","Type":"ContainerStarted","Data":"a4f5dd7ac6b84af8b5918efb7f3bbea50a578145602d5ad09f226f3295ad59a0"} Nov 25 10:03:37 crc kubenswrapper[4769]: I1125 10:03:37.948453 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 10:03:37 crc kubenswrapper[4769]: I1125 10:03:37.961038 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" event={"ID":"aaa65e3e-75e2-4f50-b9d6-aa9710a6e394","Type":"ContainerStarted","Data":"7eab0c00b3de083d74a348d8466b1b0ec69d253e62c5fdc29e58ba33547534cf"} Nov 25 10:03:37 crc kubenswrapper[4769]: I1125 10:03:37.973782 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" event={"ID":"67abea47-5e8a-43a2-8865-929cfdfc607c","Type":"ContainerStarted","Data":"dd9c511268bd8a464f00bd49b18e2f541a315215e7ed6b5701abfd14bdbaa4dd"} Nov 25 10:03:37 crc kubenswrapper[4769]: I1125 10:03:37.974409 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 10:03:37 crc kubenswrapper[4769]: I1125 10:03:37.975601 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" podStartSLOduration=3.360041663 podStartE2EDuration="43.975586075s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:56.190942427 +0000 UTC m=+1124.775914740" lastFinishedPulling="2025-11-25 10:03:36.806486829 +0000 UTC m=+1165.391459152" observedRunningTime="2025-11-25 10:03:37.971414887 +0000 UTC m=+1166.556387200" watchObservedRunningTime="2025-11-25 10:03:37.975586075 +0000 UTC m=+1166.560558388" Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.000464 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" event={"ID":"b59cac8b-fb36-4316-ab83-da7202b67af5","Type":"ContainerStarted","Data":"371aa9463273a465ceb24f33301a6c0426d67583d0d07d0027a0b404f0d1440c"} Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.025159 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" event={"ID":"1f5735dc-67e5-423c-9a8f-d42977c892d3","Type":"ContainerStarted","Data":"bc86c3f1c8c697f55cd70bb1d4dbdc09a7b83226ed26b5d6e74b13e6cfc7421d"} Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.028136 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" podStartSLOduration=4.444780579 podStartE2EDuration="44.028122972s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:57.147737354 +0000 UTC m=+1125.732709667" lastFinishedPulling="2025-11-25 10:03:36.731079747 +0000 UTC m=+1165.316052060" observedRunningTime="2025-11-25 10:03:38.026920061 +0000 UTC m=+1166.611892374" watchObservedRunningTime="2025-11-25 10:03:38.028122972 +0000 UTC m=+1166.613095285" Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.047406 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" event={"ID":"18e1910e-52b2-439b-a93f-4ffe63a7b992","Type":"ContainerStarted","Data":"e51d6bddcc2b6822ebc3f63854a26d32d9ee1b58381459f598f068d43fe8cef4"} Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.066783 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" event={"ID":"c21f4ff5-86fa-44f0-993f-59189de57182","Type":"ContainerStarted","Data":"f14e68fda1a997b38599d41374400728fb9d497d8d93908dca9ab4c5b1949171"} Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.075771 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" event={"ID":"c1d16c0c-cca9-4794-8a52-c8674d9a069e","Type":"ContainerStarted","Data":"7c19c8267b0dd24ccc7770e5b109c55588ae03ff76d8593981f18ebe8c8730b3"} Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.081895 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" event={"ID":"8e7436d0-2ff7-4a11-9ab8-74a91e56de4a","Type":"ContainerStarted","Data":"a462011427a3840a44e8f32df4a718ae0694782f52f4d0aa6277a5d05153a077"} Nov 25 10:03:38 crc kubenswrapper[4769]: E1125 10:03:38.102346 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" podUID="8e7436d0-2ff7-4a11-9ab8-74a91e56de4a" Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.104491 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" event={"ID":"9d0ef7c9-7421-4fe2-b1c8-551253bea174","Type":"ContainerStarted","Data":"45279dd12bcd1cff11c7c0c17e55876f1382777d3e466676158df5a5d8d5b2bb"} Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.118731 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" event={"ID":"88cb8ad7-c855-45eb-a471-aacb8c42082c","Type":"ContainerStarted","Data":"4f3aa52133f3331cf8c6e0dce55ef71195ad60f46262232ce78210a5f76f82c2"} Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.119217 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.128795 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" event={"ID":"1da948d8-e834-488a-a3ec-a0c0229ebaf5","Type":"ContainerStarted","Data":"7448a4db21d869931c4269fee55dd2c202864ce8d9ebe7b7c504b7c50f0deefa"} Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.130918 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.166426 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" event={"ID":"0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd","Type":"ContainerStarted","Data":"6144bebf77985cd204dbb08227a814d8238713b7b5dc1c337e5d20a8be05eb08"} Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.171609 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" podStartSLOduration=6.14548089 podStartE2EDuration="44.171579384s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.722329451 +0000 UTC m=+1127.307301764" lastFinishedPulling="2025-11-25 10:03:36.748427955 +0000 UTC m=+1165.333400258" observedRunningTime="2025-11-25 10:03:38.165723912 +0000 UTC m=+1166.750696225" watchObservedRunningTime="2025-11-25 10:03:38.171579384 +0000 UTC m=+1166.756551697" Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.181001 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" event={"ID":"169b77e8-e7b0-4d11-9915-2442f48d9347","Type":"ContainerStarted","Data":"d8d54b01d665faccbe1253ec9a1e6328b84dade14c35753f12f4a8f813c22db0"} Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.187286 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" event={"ID":"1b8cd25d-43dd-4774-b1d9-59572bb6bef7","Type":"ContainerStarted","Data":"b4c270ad5939674f6cdcc4e7433f1de6df76894e53a1b5d54e95e4b86e853a25"} Nov 25 10:03:38 crc kubenswrapper[4769]: E1125 10:03:38.241647 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" podUID="4894deb0-65ca-4b42-b397-4092a75739c9" Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.242488 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" podStartSLOduration=4.854264121 podStartE2EDuration="44.242467699s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:57.162818332 +0000 UTC m=+1125.747790645" lastFinishedPulling="2025-11-25 10:03:36.55102191 +0000 UTC m=+1165.135994223" observedRunningTime="2025-11-25 10:03:38.198847954 +0000 UTC m=+1166.783820267" watchObservedRunningTime="2025-11-25 10:03:38.242467699 +0000 UTC m=+1166.827440012" Nov 25 10:03:38 crc kubenswrapper[4769]: I1125 10:03:38.299523 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" event={"ID":"1be22f03-8697-413b-922c-9344185c05c4","Type":"ContainerStarted","Data":"e2f3021e42e072a029d56a8d3b76e9462c0380f661b493e41d4978ca8711e644"} Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.263513 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" event={"ID":"b59cac8b-fb36-4316-ab83-da7202b67af5","Type":"ContainerStarted","Data":"1a9b8362a3d604343b360e638d70bb8430a8d47fbf9df6e616d5d01ebe8d8afd"} Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.264128 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.267428 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" event={"ID":"63e66921-47a7-407a-b50e-06cf5cadb8be","Type":"ContainerStarted","Data":"e2bc390b476e8b5842c57ead30722b9aea8807b2d2505542b913b2fb7ae56e6a"} Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.268223 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.284447 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" event={"ID":"c21f4ff5-86fa-44f0-993f-59189de57182","Type":"ContainerStarted","Data":"9682b8b8fcb342c38db68ed7d793776921b15502a14e641349498956962b5ce4"} Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.285003 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.295331 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" event={"ID":"c1d16c0c-cca9-4794-8a52-c8674d9a069e","Type":"ContainerStarted","Data":"9debed69a3ef6fa17467a54b2c2f95fefc85a71ddae07c61e8776733e2c8d858"} Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.314421 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" event={"ID":"aaa65e3e-75e2-4f50-b9d6-aa9710a6e394","Type":"ContainerStarted","Data":"7c3b83fb330e4e91f8d36af736ddc4b23b1e75845f4ea765e11ec77d28f9619c"} Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.315605 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.326231 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" event={"ID":"4894deb0-65ca-4b42-b397-4092a75739c9","Type":"ContainerStarted","Data":"a2f15a7295320587f73d28fc0ec2d8fffe92a87dd433f9a1efe645ccd6b30a19"} Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.336894 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" event={"ID":"a2f1ad69-27e4-4131-a742-a8d2c5df8636","Type":"ContainerStarted","Data":"4f1f9e98a1bceb6e39dac7d8629ccd3af58e06d250d48b2006d7b7f0a78e17e5"} Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.338260 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.342998 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.348866 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" event={"ID":"18e1910e-52b2-439b-a93f-4ffe63a7b992","Type":"ContainerStarted","Data":"d8cd54e9de5c533f7a6eaf9a573521727cbf6c52978bbd41e5fe7af0bf47f07c"} Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.349820 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.354575 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" event={"ID":"1f5735dc-67e5-423c-9a8f-d42977c892d3","Type":"ContainerStarted","Data":"e17e389fbb7c359261990f8efde39476099fe9c541d058445d5c07710d2cdbad"} Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.355198 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.358827 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" event={"ID":"6d9be953-34ea-4956-96bf-84d5f8babb2d","Type":"ContainerStarted","Data":"0ae0674b08faa57f993ad26e7715a336d1995e72605f6c44f88026dd6dbc0a02"} Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.359421 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.368775 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.373854 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" event={"ID":"136f8f90-5673-4a08-ab4b-c030c1c428a6","Type":"ContainerStarted","Data":"6dbba56556ffe7f4ed2f515c3f910a4cc9f15dfcea97da36268652db34cacc7a"} Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.374900 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.381584 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.391513 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.391574 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 10:03:39 crc kubenswrapper[4769]: E1125 10:03:39.391746 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" podUID="8e7436d0-2ff7-4a11-9ab8-74a91e56de4a" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.393823 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.396452 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 10:03:39 crc kubenswrapper[4769]: I1125 10:03:39.403795 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" podStartSLOduration=3.844702196 podStartE2EDuration="45.403779573s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:57.118520922 +0000 UTC m=+1125.703493235" lastFinishedPulling="2025-11-25 10:03:38.677598299 +0000 UTC m=+1167.262570612" observedRunningTime="2025-11-25 10:03:39.402350406 +0000 UTC m=+1167.987322719" watchObservedRunningTime="2025-11-25 10:03:39.403779573 +0000 UTC m=+1167.988751886" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.072864 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" podStartSLOduration=4.407832906 podStartE2EDuration="46.072835421s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:57.188927662 +0000 UTC m=+1125.773899975" lastFinishedPulling="2025-11-25 10:03:38.853930167 +0000 UTC m=+1167.438902490" observedRunningTime="2025-11-25 10:03:39.955341034 +0000 UTC m=+1168.540313347" watchObservedRunningTime="2025-11-25 10:03:40.072835421 +0000 UTC m=+1168.657807734" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.077428 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" podStartSLOduration=7.199958555 podStartE2EDuration="46.077401199s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.751158063 +0000 UTC m=+1127.336130376" lastFinishedPulling="2025-11-25 10:03:37.628600707 +0000 UTC m=+1166.213573020" observedRunningTime="2025-11-25 10:03:40.075210802 +0000 UTC m=+1168.660183115" watchObservedRunningTime="2025-11-25 10:03:40.077401199 +0000 UTC m=+1168.662373512" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.158831 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" podStartSLOduration=7.394698608 podStartE2EDuration="46.158807408s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.881109576 +0000 UTC m=+1127.466081889" lastFinishedPulling="2025-11-25 10:03:37.645218376 +0000 UTC m=+1166.230190689" observedRunningTime="2025-11-25 10:03:40.130575533 +0000 UTC m=+1168.715547846" watchObservedRunningTime="2025-11-25 10:03:40.158807408 +0000 UTC m=+1168.743779721" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.161119 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" podStartSLOduration=5.15559407 podStartE2EDuration="45.161112228s" podCreationTimestamp="2025-11-25 10:02:55 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.665971732 +0000 UTC m=+1127.250944045" lastFinishedPulling="2025-11-25 10:03:38.67148989 +0000 UTC m=+1167.256462203" observedRunningTime="2025-11-25 10:03:40.158378266 +0000 UTC m=+1168.743350569" watchObservedRunningTime="2025-11-25 10:03:40.161112228 +0000 UTC m=+1168.746084541" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.205296 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" podStartSLOduration=7.143039101 podStartE2EDuration="46.205275147s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.567020958 +0000 UTC m=+1127.151993271" lastFinishedPulling="2025-11-25 10:03:37.629257004 +0000 UTC m=+1166.214229317" observedRunningTime="2025-11-25 10:03:40.204453685 +0000 UTC m=+1168.789425998" watchObservedRunningTime="2025-11-25 10:03:40.205275147 +0000 UTC m=+1168.790247460" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.293982 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" podStartSLOduration=5.179690566 podStartE2EDuration="46.293945974s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:57.557673514 +0000 UTC m=+1126.142645817" lastFinishedPulling="2025-11-25 10:03:38.671928912 +0000 UTC m=+1167.256901225" observedRunningTime="2025-11-25 10:03:40.279360114 +0000 UTC m=+1168.864332427" watchObservedRunningTime="2025-11-25 10:03:40.293945974 +0000 UTC m=+1168.878918287" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.346422 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" podStartSLOduration=6.508796547 podStartE2EDuration="45.346400148s" podCreationTimestamp="2025-11-25 10:02:55 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.75368411 +0000 UTC m=+1127.338656423" lastFinishedPulling="2025-11-25 10:03:37.591287711 +0000 UTC m=+1166.176260024" observedRunningTime="2025-11-25 10:03:40.33764221 +0000 UTC m=+1168.922614523" watchObservedRunningTime="2025-11-25 10:03:40.346400148 +0000 UTC m=+1168.931372461" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.399405 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" event={"ID":"169b77e8-e7b0-4d11-9915-2442f48d9347","Type":"ContainerStarted","Data":"c819ffdb79867617900c0ddd11447953f5677fd1b9d122c17ebdeb83cbd8965a"} Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.401460 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.404395 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" podStartSLOduration=5.334407217 podStartE2EDuration="45.404368987s" podCreationTimestamp="2025-11-25 10:02:55 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.602707621 +0000 UTC m=+1127.187679934" lastFinishedPulling="2025-11-25 10:03:38.672669391 +0000 UTC m=+1167.257641704" observedRunningTime="2025-11-25 10:03:40.377783725 +0000 UTC m=+1168.962756038" watchObservedRunningTime="2025-11-25 10:03:40.404368987 +0000 UTC m=+1168.989341300" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.405215 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" event={"ID":"4894deb0-65ca-4b42-b397-4092a75739c9","Type":"ContainerStarted","Data":"2a082f7c1ba5b377707c21505ffb93a704670d0c971db75262950820a1188ed5"} Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.406041 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.408799 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" event={"ID":"1b8cd25d-43dd-4774-b1d9-59572bb6bef7","Type":"ContainerStarted","Data":"ed06562448baa51835a8cf1f3e807c9075497978a76b10bd16176ef8d2f1f3b4"} Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.409513 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.415584 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" event={"ID":"1be22f03-8697-413b-922c-9344185c05c4","Type":"ContainerStarted","Data":"7517f359f23085b907717c524af1b29960d56f40646f99c57e3a8a7751cfdba4"} Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.415669 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" podStartSLOduration=5.127130847 podStartE2EDuration="46.41564558s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:57.157760769 +0000 UTC m=+1125.742733072" lastFinishedPulling="2025-11-25 10:03:38.446275492 +0000 UTC m=+1167.031247805" observedRunningTime="2025-11-25 10:03:40.400308271 +0000 UTC m=+1168.985280584" watchObservedRunningTime="2025-11-25 10:03:40.41564558 +0000 UTC m=+1169.000617893" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.421551 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" event={"ID":"9d0ef7c9-7421-4fe2-b1c8-551253bea174","Type":"ContainerStarted","Data":"6c323d71f968e46de9e8d909661fa4e76f6ea4840e9889b4fa51e11bf032dd62"} Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.425286 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.436674 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" event={"ID":"0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd","Type":"ContainerStarted","Data":"6c148f7c3df14f45a0d8f4337fe0cdd97c4a74585964db421c37af0e242000af"} Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.439652 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.455216 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.479117 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" podStartSLOduration=5.378812372 podStartE2EDuration="45.479092511s" podCreationTimestamp="2025-11-25 10:02:55 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.753636068 +0000 UTC m=+1127.338608381" lastFinishedPulling="2025-11-25 10:03:38.853916207 +0000 UTC m=+1167.438888520" observedRunningTime="2025-11-25 10:03:40.464002338 +0000 UTC m=+1169.048974651" watchObservedRunningTime="2025-11-25 10:03:40.479092511 +0000 UTC m=+1169.064064824" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.548832 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" podStartSLOduration=5.952176019 podStartE2EDuration="45.548810965s" podCreationTimestamp="2025-11-25 10:02:55 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.626024067 +0000 UTC m=+1127.210996390" lastFinishedPulling="2025-11-25 10:03:38.222659013 +0000 UTC m=+1166.807631336" observedRunningTime="2025-11-25 10:03:40.548744873 +0000 UTC m=+1169.133717186" watchObservedRunningTime="2025-11-25 10:03:40.548810965 +0000 UTC m=+1169.133783278" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.590266 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" podStartSLOduration=6.313177154 podStartE2EDuration="46.590223592s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.751602495 +0000 UTC m=+1127.336574808" lastFinishedPulling="2025-11-25 10:03:39.028648933 +0000 UTC m=+1167.613621246" observedRunningTime="2025-11-25 10:03:40.574466062 +0000 UTC m=+1169.159438375" watchObservedRunningTime="2025-11-25 10:03:40.590223592 +0000 UTC m=+1169.175196265" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.636471 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" podStartSLOduration=5.1398977949999995 podStartE2EDuration="46.636443475s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:57.516712162 +0000 UTC m=+1126.101684475" lastFinishedPulling="2025-11-25 10:03:39.013257842 +0000 UTC m=+1167.598230155" observedRunningTime="2025-11-25 10:03:40.629490664 +0000 UTC m=+1169.214462967" watchObservedRunningTime="2025-11-25 10:03:40.636443475 +0000 UTC m=+1169.221415788" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.679280 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" podStartSLOduration=6.931279694 podStartE2EDuration="45.679259499s" podCreationTimestamp="2025-11-25 10:02:55 +0000 UTC" firstStartedPulling="2025-11-25 10:03:00.279096177 +0000 UTC m=+1128.864068490" lastFinishedPulling="2025-11-25 10:03:39.027075982 +0000 UTC m=+1167.612048295" observedRunningTime="2025-11-25 10:03:40.671749053 +0000 UTC m=+1169.256721366" watchObservedRunningTime="2025-11-25 10:03:40.679259499 +0000 UTC m=+1169.264231812" Nov 25 10:03:40 crc kubenswrapper[4769]: I1125 10:03:40.709524 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" podStartSLOduration=3.525150465 podStartE2EDuration="46.709500776s" podCreationTimestamp="2025-11-25 10:02:54 +0000 UTC" firstStartedPulling="2025-11-25 10:02:56.675257142 +0000 UTC m=+1125.260229455" lastFinishedPulling="2025-11-25 10:03:39.859607453 +0000 UTC m=+1168.444579766" observedRunningTime="2025-11-25 10:03:40.706880297 +0000 UTC m=+1169.291852610" watchObservedRunningTime="2025-11-25 10:03:40.709500776 +0000 UTC m=+1169.294473089" Nov 25 10:03:45 crc kubenswrapper[4769]: I1125 10:03:45.013303 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 10:03:45 crc kubenswrapper[4769]: I1125 10:03:45.088715 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 10:03:45 crc kubenswrapper[4769]: I1125 10:03:45.139136 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 10:03:45 crc kubenswrapper[4769]: I1125 10:03:45.466303 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 10:03:45 crc kubenswrapper[4769]: I1125 10:03:45.507189 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 10:03:45 crc kubenswrapper[4769]: I1125 10:03:45.565545 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 10:03:45 crc kubenswrapper[4769]: I1125 10:03:45.608442 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 10:03:45 crc kubenswrapper[4769]: I1125 10:03:45.697719 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 10:03:45 crc kubenswrapper[4769]: I1125 10:03:45.746156 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 10:03:45 crc kubenswrapper[4769]: I1125 10:03:45.927357 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 10:03:46 crc kubenswrapper[4769]: I1125 10:03:46.050429 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 10:03:46 crc kubenswrapper[4769]: I1125 10:03:46.193348 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" Nov 25 10:03:46 crc kubenswrapper[4769]: I1125 10:03:46.385117 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" Nov 25 10:03:47 crc kubenswrapper[4769]: E1125 10:03:47.242371 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" podUID="a032414e-4be2-47f7-ac88-3bdec0ccb151" Nov 25 10:03:49 crc kubenswrapper[4769]: I1125 10:03:49.441114 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 10:03:52 crc kubenswrapper[4769]: I1125 10:03:52.559507 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" event={"ID":"8e7436d0-2ff7-4a11-9ab8-74a91e56de4a","Type":"ContainerStarted","Data":"1b86f4ada3b0122a9ae7cd05945e61760cc2f82de35dd3149d4fe9150e15c4cd"} Nov 25 10:03:52 crc kubenswrapper[4769]: I1125 10:03:52.560658 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 10:03:52 crc kubenswrapper[4769]: I1125 10:03:52.586259 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" podStartSLOduration=4.66885155 podStartE2EDuration="57.586238014s" podCreationTimestamp="2025-11-25 10:02:55 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.752867258 +0000 UTC m=+1127.337839611" lastFinishedPulling="2025-11-25 10:03:51.670253762 +0000 UTC m=+1180.255226075" observedRunningTime="2025-11-25 10:03:52.578624296 +0000 UTC m=+1181.163596609" watchObservedRunningTime="2025-11-25 10:03:52.586238014 +0000 UTC m=+1181.171210327" Nov 25 10:03:56 crc kubenswrapper[4769]: I1125 10:03:56.338914 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 10:04:02 crc kubenswrapper[4769]: I1125 10:04:02.674162 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" event={"ID":"a032414e-4be2-47f7-ac88-3bdec0ccb151","Type":"ContainerStarted","Data":"39f483ab6967bf5b12b80bbafec1e9b21fe98906c3ce2a5f1956a4b7f80f92de"} Nov 25 10:04:02 crc kubenswrapper[4769]: I1125 10:04:02.732625 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" podStartSLOduration=4.922392095 podStartE2EDuration="1m7.732591431s" podCreationTimestamp="2025-11-25 10:02:55 +0000 UTC" firstStartedPulling="2025-11-25 10:02:58.90058352 +0000 UTC m=+1127.485555823" lastFinishedPulling="2025-11-25 10:04:01.710782806 +0000 UTC m=+1190.295755159" observedRunningTime="2025-11-25 10:04:02.69603388 +0000 UTC m=+1191.281006213" watchObservedRunningTime="2025-11-25 10:04:02.732591431 +0000 UTC m=+1191.317563744" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.087772 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jldwl"] Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.092047 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.096063 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.096301 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.096520 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-px795" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.102509 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jldwl"] Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.119088 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.156850 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-5dk5k"] Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.158627 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.165192 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.167004 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-5dk5k"] Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.200127 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-995fv\" (UniqueName: \"kubernetes.io/projected/ab96ca8c-d498-4f2f-a19c-07cbe5356c21-kube-api-access-995fv\") pod \"dnsmasq-dns-675f4bcbfc-jldwl\" (UID: \"ab96ca8c-d498-4f2f-a19c-07cbe5356c21\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.200215 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab96ca8c-d498-4f2f-a19c-07cbe5356c21-config\") pod \"dnsmasq-dns-675f4bcbfc-jldwl\" (UID: \"ab96ca8c-d498-4f2f-a19c-07cbe5356c21\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.200251 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-5dk5k\" (UID: \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.200289 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kx55d\" (UniqueName: \"kubernetes.io/projected/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-kube-api-access-kx55d\") pod \"dnsmasq-dns-78dd6ddcc-5dk5k\" (UID: \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.200401 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-config\") pod \"dnsmasq-dns-78dd6ddcc-5dk5k\" (UID: \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.301833 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-5dk5k\" (UID: \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.301977 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kx55d\" (UniqueName: \"kubernetes.io/projected/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-kube-api-access-kx55d\") pod \"dnsmasq-dns-78dd6ddcc-5dk5k\" (UID: \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.302035 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-config\") pod \"dnsmasq-dns-78dd6ddcc-5dk5k\" (UID: \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.302137 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-995fv\" (UniqueName: \"kubernetes.io/projected/ab96ca8c-d498-4f2f-a19c-07cbe5356c21-kube-api-access-995fv\") pod \"dnsmasq-dns-675f4bcbfc-jldwl\" (UID: \"ab96ca8c-d498-4f2f-a19c-07cbe5356c21\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.302234 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab96ca8c-d498-4f2f-a19c-07cbe5356c21-config\") pod \"dnsmasq-dns-675f4bcbfc-jldwl\" (UID: \"ab96ca8c-d498-4f2f-a19c-07cbe5356c21\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.303063 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-5dk5k\" (UID: \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.303102 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-config\") pod \"dnsmasq-dns-78dd6ddcc-5dk5k\" (UID: \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.303238 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab96ca8c-d498-4f2f-a19c-07cbe5356c21-config\") pod \"dnsmasq-dns-675f4bcbfc-jldwl\" (UID: \"ab96ca8c-d498-4f2f-a19c-07cbe5356c21\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.327987 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kx55d\" (UniqueName: \"kubernetes.io/projected/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-kube-api-access-kx55d\") pod \"dnsmasq-dns-78dd6ddcc-5dk5k\" (UID: \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\") " pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.330786 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-995fv\" (UniqueName: \"kubernetes.io/projected/ab96ca8c-d498-4f2f-a19c-07cbe5356c21-kube-api-access-995fv\") pod \"dnsmasq-dns-675f4bcbfc-jldwl\" (UID: \"ab96ca8c-d498-4f2f-a19c-07cbe5356c21\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.413079 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.482841 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:20 crc kubenswrapper[4769]: I1125 10:04:20.959059 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jldwl"] Nov 25 10:04:20 crc kubenswrapper[4769]: W1125 10:04:20.962791 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab96ca8c_d498_4f2f_a19c_07cbe5356c21.slice/crio-b48cea0c55c337a6c81d31173cec0e314cffda1c155dead024042c52ec31825d WatchSource:0}: Error finding container b48cea0c55c337a6c81d31173cec0e314cffda1c155dead024042c52ec31825d: Status 404 returned error can't find the container with id b48cea0c55c337a6c81d31173cec0e314cffda1c155dead024042c52ec31825d Nov 25 10:04:21 crc kubenswrapper[4769]: I1125 10:04:21.072132 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-5dk5k"] Nov 25 10:04:21 crc kubenswrapper[4769]: W1125 10:04:21.079086 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc81cebb9_bdc4_48e9_aa6e_379e9a306c33.slice/crio-a4435bfe219190e8eba28d9d9943de182b114744ca8bb5efc697d807e13b9c5e WatchSource:0}: Error finding container a4435bfe219190e8eba28d9d9943de182b114744ca8bb5efc697d807e13b9c5e: Status 404 returned error can't find the container with id a4435bfe219190e8eba28d9d9943de182b114744ca8bb5efc697d807e13b9c5e Nov 25 10:04:21 crc kubenswrapper[4769]: I1125 10:04:21.926144 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" event={"ID":"ab96ca8c-d498-4f2f-a19c-07cbe5356c21","Type":"ContainerStarted","Data":"b48cea0c55c337a6c81d31173cec0e314cffda1c155dead024042c52ec31825d"} Nov 25 10:04:21 crc kubenswrapper[4769]: I1125 10:04:21.928254 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" event={"ID":"c81cebb9-bdc4-48e9-aa6e-379e9a306c33","Type":"ContainerStarted","Data":"a4435bfe219190e8eba28d9d9943de182b114744ca8bb5efc697d807e13b9c5e"} Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.199903 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jldwl"] Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.238779 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-9vg8m"] Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.240518 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.260321 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-9vg8m"] Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.276871 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50d6488a-84f8-41e0-b971-49822125edf2-config\") pod \"dnsmasq-dns-666b6646f7-9vg8m\" (UID: \"50d6488a-84f8-41e0-b971-49822125edf2\") " pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.276951 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssrvf\" (UniqueName: \"kubernetes.io/projected/50d6488a-84f8-41e0-b971-49822125edf2-kube-api-access-ssrvf\") pod \"dnsmasq-dns-666b6646f7-9vg8m\" (UID: \"50d6488a-84f8-41e0-b971-49822125edf2\") " pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.277034 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50d6488a-84f8-41e0-b971-49822125edf2-dns-svc\") pod \"dnsmasq-dns-666b6646f7-9vg8m\" (UID: \"50d6488a-84f8-41e0-b971-49822125edf2\") " pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.379771 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50d6488a-84f8-41e0-b971-49822125edf2-config\") pod \"dnsmasq-dns-666b6646f7-9vg8m\" (UID: \"50d6488a-84f8-41e0-b971-49822125edf2\") " pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.379861 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssrvf\" (UniqueName: \"kubernetes.io/projected/50d6488a-84f8-41e0-b971-49822125edf2-kube-api-access-ssrvf\") pod \"dnsmasq-dns-666b6646f7-9vg8m\" (UID: \"50d6488a-84f8-41e0-b971-49822125edf2\") " pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.379944 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50d6488a-84f8-41e0-b971-49822125edf2-dns-svc\") pod \"dnsmasq-dns-666b6646f7-9vg8m\" (UID: \"50d6488a-84f8-41e0-b971-49822125edf2\") " pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.381324 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50d6488a-84f8-41e0-b971-49822125edf2-dns-svc\") pod \"dnsmasq-dns-666b6646f7-9vg8m\" (UID: \"50d6488a-84f8-41e0-b971-49822125edf2\") " pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.386610 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50d6488a-84f8-41e0-b971-49822125edf2-config\") pod \"dnsmasq-dns-666b6646f7-9vg8m\" (UID: \"50d6488a-84f8-41e0-b971-49822125edf2\") " pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.406436 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssrvf\" (UniqueName: \"kubernetes.io/projected/50d6488a-84f8-41e0-b971-49822125edf2-kube-api-access-ssrvf\") pod \"dnsmasq-dns-666b6646f7-9vg8m\" (UID: \"50d6488a-84f8-41e0-b971-49822125edf2\") " pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.574048 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-5dk5k"] Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.574252 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.616159 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6cxlg"] Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.618104 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.644007 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6cxlg"] Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.686337 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1ce4931-1e74-4600-87e2-5fb7c899376a-config\") pod \"dnsmasq-dns-57d769cc4f-6cxlg\" (UID: \"f1ce4931-1e74-4600-87e2-5fb7c899376a\") " pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.686594 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsq2d\" (UniqueName: \"kubernetes.io/projected/f1ce4931-1e74-4600-87e2-5fb7c899376a-kube-api-access-xsq2d\") pod \"dnsmasq-dns-57d769cc4f-6cxlg\" (UID: \"f1ce4931-1e74-4600-87e2-5fb7c899376a\") " pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.686741 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1ce4931-1e74-4600-87e2-5fb7c899376a-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-6cxlg\" (UID: \"f1ce4931-1e74-4600-87e2-5fb7c899376a\") " pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.788132 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1ce4931-1e74-4600-87e2-5fb7c899376a-config\") pod \"dnsmasq-dns-57d769cc4f-6cxlg\" (UID: \"f1ce4931-1e74-4600-87e2-5fb7c899376a\") " pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.788663 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsq2d\" (UniqueName: \"kubernetes.io/projected/f1ce4931-1e74-4600-87e2-5fb7c899376a-kube-api-access-xsq2d\") pod \"dnsmasq-dns-57d769cc4f-6cxlg\" (UID: \"f1ce4931-1e74-4600-87e2-5fb7c899376a\") " pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.788713 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1ce4931-1e74-4600-87e2-5fb7c899376a-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-6cxlg\" (UID: \"f1ce4931-1e74-4600-87e2-5fb7c899376a\") " pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.791426 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1ce4931-1e74-4600-87e2-5fb7c899376a-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-6cxlg\" (UID: \"f1ce4931-1e74-4600-87e2-5fb7c899376a\") " pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.792039 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1ce4931-1e74-4600-87e2-5fb7c899376a-config\") pod \"dnsmasq-dns-57d769cc4f-6cxlg\" (UID: \"f1ce4931-1e74-4600-87e2-5fb7c899376a\") " pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:23 crc kubenswrapper[4769]: I1125 10:04:23.820359 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsq2d\" (UniqueName: \"kubernetes.io/projected/f1ce4931-1e74-4600-87e2-5fb7c899376a-kube-api-access-xsq2d\") pod \"dnsmasq-dns-57d769cc4f-6cxlg\" (UID: \"f1ce4931-1e74-4600-87e2-5fb7c899376a\") " pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.039650 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.223939 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-9vg8m"] Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.383023 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.400413 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.406385 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.406901 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.407205 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-zjtmv" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.407351 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.407483 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.407617 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.408400 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.423095 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.505624 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/981369ae-93f2-4c25-bdea-d3d89686b0d5-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.505834 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-server-conf\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.505861 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.505877 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8pjv\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-kube-api-access-z8pjv\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.506111 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.506164 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-config-data\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.506191 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.506215 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.506233 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.506268 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.506289 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/981369ae-93f2-4c25-bdea-d3d89686b0d5-pod-info\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.607597 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.607682 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-config-data\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.607719 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.607739 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.607755 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.607801 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.607824 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/981369ae-93f2-4c25-bdea-d3d89686b0d5-pod-info\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.607858 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/981369ae-93f2-4c25-bdea-d3d89686b0d5-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.607886 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-server-conf\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.607906 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.607988 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8pjv\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-kube-api-access-z8pjv\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.609221 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.609864 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.610064 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.610798 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-config-data\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.611458 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-server-conf\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.612232 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.618989 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/981369ae-93f2-4c25-bdea-d3d89686b0d5-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.620492 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.620572 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.630751 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/981369ae-93f2-4c25-bdea-d3d89686b0d5-pod-info\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.631884 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8pjv\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-kube-api-access-z8pjv\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.658866 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.708525 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6cxlg"] Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.754613 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.770026 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.783241 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.784225 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.784423 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.787471 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-9tnjw" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.788675 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.784417 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.790402 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.801330 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.806345 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.916883 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4857aaf8-4133-4c20-bc8c-d4d195091176-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.917019 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.917065 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.917104 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.917124 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.917165 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.917193 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.917251 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8znv\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-kube-api-access-d8znv\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.917306 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.917334 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.917397 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4857aaf8-4133-4c20-bc8c-d4d195091176-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.972128 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" event={"ID":"f1ce4931-1e74-4600-87e2-5fb7c899376a","Type":"ContainerStarted","Data":"c9061c6badddd8fd3f81eaf3a44f1b95246c968fd696a583d0f5f818b1f328fa"} Nov 25 10:04:24 crc kubenswrapper[4769]: I1125 10:04:24.973857 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" event={"ID":"50d6488a-84f8-41e0-b971-49822125edf2","Type":"ContainerStarted","Data":"cf688d93473a051cc9829bf7a796a24d3d750124eeabb5b84898936a34a74818"} Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019045 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019094 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019129 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019167 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019204 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8znv\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-kube-api-access-d8znv\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019244 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019271 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019317 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4857aaf8-4133-4c20-bc8c-d4d195091176-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019343 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4857aaf8-4133-4c20-bc8c-d4d195091176-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019399 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019421 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019823 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.019845 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.020860 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.021478 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.021732 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.022750 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.026231 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.029299 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4857aaf8-4133-4c20-bc8c-d4d195091176-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.034316 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4857aaf8-4133-4c20-bc8c-d4d195091176-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.036701 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.049015 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8znv\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-kube-api-access-d8znv\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.055956 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.106387 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.378072 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:04:25 crc kubenswrapper[4769]: W1125 10:04:25.395497 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod981369ae_93f2_4c25_bdea_d3d89686b0d5.slice/crio-91e89b078c5be26a12c4bd313bea66caccf2d4a847c3cf136dc471af97493c30 WatchSource:0}: Error finding container 91e89b078c5be26a12c4bd313bea66caccf2d4a847c3cf136dc471af97493c30: Status 404 returned error can't find the container with id 91e89b078c5be26a12c4bd313bea66caccf2d4a847c3cf136dc471af97493c30 Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.706469 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:04:25 crc kubenswrapper[4769]: W1125 10:04:25.716642 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4857aaf8_4133_4c20_bc8c_d4d195091176.slice/crio-f40e71ab65d74e5d0367494f2a420b267a77505c2df1aa23eeef6dabf10272d8 WatchSource:0}: Error finding container f40e71ab65d74e5d0367494f2a420b267a77505c2df1aa23eeef6dabf10272d8: Status 404 returned error can't find the container with id f40e71ab65d74e5d0367494f2a420b267a77505c2df1aa23eeef6dabf10272d8 Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.985375 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4857aaf8-4133-4c20-bc8c-d4d195091176","Type":"ContainerStarted","Data":"f40e71ab65d74e5d0367494f2a420b267a77505c2df1aa23eeef6dabf10272d8"} Nov 25 10:04:25 crc kubenswrapper[4769]: I1125 10:04:25.987174 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"981369ae-93f2-4c25-bdea-d3d89686b0d5","Type":"ContainerStarted","Data":"91e89b078c5be26a12c4bd313bea66caccf2d4a847c3cf136dc471af97493c30"} Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.139735 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.142128 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.150149 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.155094 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-k46pn" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.156491 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.157090 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.161304 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.174925 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.268994 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52438bf8-8800-4078-bc88-63033a83dd2e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.269071 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.269176 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/52438bf8-8800-4078-bc88-63033a83dd2e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.269240 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/52438bf8-8800-4078-bc88-63033a83dd2e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.269319 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/52438bf8-8800-4078-bc88-63033a83dd2e-config-data-default\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.269357 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jk46w\" (UniqueName: \"kubernetes.io/projected/52438bf8-8800-4078-bc88-63033a83dd2e-kube-api-access-jk46w\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.269414 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/52438bf8-8800-4078-bc88-63033a83dd2e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.269447 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/52438bf8-8800-4078-bc88-63033a83dd2e-kolla-config\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.377250 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52438bf8-8800-4078-bc88-63033a83dd2e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.377478 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.377546 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/52438bf8-8800-4078-bc88-63033a83dd2e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.377638 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/52438bf8-8800-4078-bc88-63033a83dd2e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.377679 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/52438bf8-8800-4078-bc88-63033a83dd2e-config-data-default\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.377745 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jk46w\" (UniqueName: \"kubernetes.io/projected/52438bf8-8800-4078-bc88-63033a83dd2e-kube-api-access-jk46w\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.377769 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.378806 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/52438bf8-8800-4078-bc88-63033a83dd2e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.378978 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/52438bf8-8800-4078-bc88-63033a83dd2e-kolla-config\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.378990 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/52438bf8-8800-4078-bc88-63033a83dd2e-config-data-generated\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.380149 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/52438bf8-8800-4078-bc88-63033a83dd2e-config-data-default\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.380603 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/52438bf8-8800-4078-bc88-63033a83dd2e-kolla-config\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.380744 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/52438bf8-8800-4078-bc88-63033a83dd2e-operator-scripts\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.393054 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/52438bf8-8800-4078-bc88-63033a83dd2e-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.397433 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52438bf8-8800-4078-bc88-63033a83dd2e-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.401549 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jk46w\" (UniqueName: \"kubernetes.io/projected/52438bf8-8800-4078-bc88-63033a83dd2e-kube-api-access-jk46w\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.434141 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"52438bf8-8800-4078-bc88-63033a83dd2e\") " pod="openstack/openstack-galera-0" Nov 25 10:04:26 crc kubenswrapper[4769]: I1125 10:04:26.508867 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.691010 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.692688 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.696872 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.697490 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-sw897" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.697626 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.698310 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.731495 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.824698 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8e360825-f56f-4e69-9e17-c9e78f295267-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.824754 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e360825-f56f-4e69-9e17-c9e78f295267-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.824786 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8e360825-f56f-4e69-9e17-c9e78f295267-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.824820 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrsv6\" (UniqueName: \"kubernetes.io/projected/8e360825-f56f-4e69-9e17-c9e78f295267-kube-api-access-vrsv6\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.824860 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e360825-f56f-4e69-9e17-c9e78f295267-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.824912 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e360825-f56f-4e69-9e17-c9e78f295267-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.824944 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.825004 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8e360825-f56f-4e69-9e17-c9e78f295267-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.917036 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.918663 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.927399 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrsv6\" (UniqueName: \"kubernetes.io/projected/8e360825-f56f-4e69-9e17-c9e78f295267-kube-api-access-vrsv6\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.929237 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e360825-f56f-4e69-9e17-c9e78f295267-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.929674 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e360825-f56f-4e69-9e17-c9e78f295267-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.929776 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.929904 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8e360825-f56f-4e69-9e17-c9e78f295267-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.930024 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8e360825-f56f-4e69-9e17-c9e78f295267-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.931347 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8e360825-f56f-4e69-9e17-c9e78f295267-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.928600 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.928657 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.928691 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-pldh2" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.932542 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e360825-f56f-4e69-9e17-c9e78f295267-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.932627 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8e360825-f56f-4e69-9e17-c9e78f295267-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.933219 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8e360825-f56f-4e69-9e17-c9e78f295267-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.936529 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.937553 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.948011 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8e360825-f56f-4e69-9e17-c9e78f295267-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:27 crc kubenswrapper[4769]: I1125 10:04:27.948157 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e360825-f56f-4e69-9e17-c9e78f295267-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.003909 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e360825-f56f-4e69-9e17-c9e78f295267-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.011760 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrsv6\" (UniqueName: \"kubernetes.io/projected/8e360825-f56f-4e69-9e17-c9e78f295267-kube-api-access-vrsv6\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.020331 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e360825-f56f-4e69-9e17-c9e78f295267-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.030160 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"8e360825-f56f-4e69-9e17-c9e78f295267\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.036819 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c646d86-e48c-4d40-8370-1736c484875f-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.036867 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8c646d86-e48c-4d40-8370-1736c484875f-kolla-config\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.036993 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c646d86-e48c-4d40-8370-1736c484875f-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.037020 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wx94s\" (UniqueName: \"kubernetes.io/projected/8c646d86-e48c-4d40-8370-1736c484875f-kube-api-access-wx94s\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.037067 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8c646d86-e48c-4d40-8370-1736c484875f-config-data\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.139352 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8c646d86-e48c-4d40-8370-1736c484875f-config-data\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.139466 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c646d86-e48c-4d40-8370-1736c484875f-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.139489 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8c646d86-e48c-4d40-8370-1736c484875f-kolla-config\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.139552 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c646d86-e48c-4d40-8370-1736c484875f-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.139572 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wx94s\" (UniqueName: \"kubernetes.io/projected/8c646d86-e48c-4d40-8370-1736c484875f-kube-api-access-wx94s\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.141406 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8c646d86-e48c-4d40-8370-1736c484875f-kolla-config\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.141434 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8c646d86-e48c-4d40-8370-1736c484875f-config-data\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.148062 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c646d86-e48c-4d40-8370-1736c484875f-combined-ca-bundle\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.159945 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wx94s\" (UniqueName: \"kubernetes.io/projected/8c646d86-e48c-4d40-8370-1736c484875f-kube-api-access-wx94s\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.160181 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c646d86-e48c-4d40-8370-1736c484875f-memcached-tls-certs\") pod \"memcached-0\" (UID: \"8c646d86-e48c-4d40-8370-1736c484875f\") " pod="openstack/memcached-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.332589 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 10:04:28 crc kubenswrapper[4769]: I1125 10:04:28.407293 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 10:04:30 crc kubenswrapper[4769]: I1125 10:04:30.223529 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:04:30 crc kubenswrapper[4769]: I1125 10:04:30.225665 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:04:30 crc kubenswrapper[4769]: I1125 10:04:30.232387 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-pdvwp" Nov 25 10:04:30 crc kubenswrapper[4769]: I1125 10:04:30.276508 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:04:30 crc kubenswrapper[4769]: I1125 10:04:30.300669 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lskqw\" (UniqueName: \"kubernetes.io/projected/75a23be3-741b-4e4e-a168-4cc37f54073d-kube-api-access-lskqw\") pod \"kube-state-metrics-0\" (UID: \"75a23be3-741b-4e4e-a168-4cc37f54073d\") " pod="openstack/kube-state-metrics-0" Nov 25 10:04:30 crc kubenswrapper[4769]: I1125 10:04:30.404739 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lskqw\" (UniqueName: \"kubernetes.io/projected/75a23be3-741b-4e4e-a168-4cc37f54073d-kube-api-access-lskqw\") pod \"kube-state-metrics-0\" (UID: \"75a23be3-741b-4e4e-a168-4cc37f54073d\") " pod="openstack/kube-state-metrics-0" Nov 25 10:04:30 crc kubenswrapper[4769]: I1125 10:04:30.481207 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lskqw\" (UniqueName: \"kubernetes.io/projected/75a23be3-741b-4e4e-a168-4cc37f54073d-kube-api-access-lskqw\") pod \"kube-state-metrics-0\" (UID: \"75a23be3-741b-4e4e-a168-4cc37f54073d\") " pod="openstack/kube-state-metrics-0" Nov 25 10:04:30 crc kubenswrapper[4769]: I1125 10:04:30.589985 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.017500 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89"] Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.019071 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.026465 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-9v6jl" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.026481 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.042097 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89"] Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.124826 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dz4t4\" (UniqueName: \"kubernetes.io/projected/cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59-kube-api-access-dz4t4\") pod \"observability-ui-dashboards-7d5fb4cbfb-wrf89\" (UID: \"cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.124953 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-wrf89\" (UID: \"cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.226508 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-wrf89\" (UID: \"cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.226635 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dz4t4\" (UniqueName: \"kubernetes.io/projected/cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59-kube-api-access-dz4t4\") pod \"observability-ui-dashboards-7d5fb4cbfb-wrf89\" (UID: \"cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89" Nov 25 10:04:31 crc kubenswrapper[4769]: E1125 10:04:31.227083 4769 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Nov 25 10:04:31 crc kubenswrapper[4769]: E1125 10:04:31.227135 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59-serving-cert podName:cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59 nodeName:}" failed. No retries permitted until 2025-11-25 10:04:31.727116791 +0000 UTC m=+1220.312089104 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59-serving-cert") pod "observability-ui-dashboards-7d5fb4cbfb-wrf89" (UID: "cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59") : secret "observability-ui-dashboards" not found Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.252308 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dz4t4\" (UniqueName: \"kubernetes.io/projected/cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59-kube-api-access-dz4t4\") pod \"observability-ui-dashboards-7d5fb4cbfb-wrf89\" (UID: \"cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.438383 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-859655bd7d-v8qvf"] Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.445614 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.475460 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-859655bd7d-v8qvf"] Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.534680 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/18f2ba17-1349-4c25-9585-d1f119a6988b-console-oauth-config\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.534765 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/18f2ba17-1349-4c25-9585-d1f119a6988b-console-serving-cert\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.534825 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/18f2ba17-1349-4c25-9585-d1f119a6988b-service-ca\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.534880 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/18f2ba17-1349-4c25-9585-d1f119a6988b-oauth-serving-cert\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.534908 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpx92\" (UniqueName: \"kubernetes.io/projected/18f2ba17-1349-4c25-9585-d1f119a6988b-kube-api-access-jpx92\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.534936 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18f2ba17-1349-4c25-9585-d1f119a6988b-trusted-ca-bundle\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.535402 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/18f2ba17-1349-4c25-9585-d1f119a6988b-console-config\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.569710 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.573648 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.576464 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-rgbvj" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.576824 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.576939 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.576834 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.577316 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.581311 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.583747 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.640251 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/18f2ba17-1349-4c25-9585-d1f119a6988b-service-ca\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.640298 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/18f2ba17-1349-4c25-9585-d1f119a6988b-oauth-serving-cert\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.640335 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpx92\" (UniqueName: \"kubernetes.io/projected/18f2ba17-1349-4c25-9585-d1f119a6988b-kube-api-access-jpx92\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.640361 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18f2ba17-1349-4c25-9585-d1f119a6988b-trusted-ca-bundle\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.640438 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/18f2ba17-1349-4c25-9585-d1f119a6988b-console-config\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.640517 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/18f2ba17-1349-4c25-9585-d1f119a6988b-console-oauth-config\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.640540 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/18f2ba17-1349-4c25-9585-d1f119a6988b-console-serving-cert\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.642696 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/18f2ba17-1349-4c25-9585-d1f119a6988b-oauth-serving-cert\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.643018 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/18f2ba17-1349-4c25-9585-d1f119a6988b-service-ca\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.643687 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/18f2ba17-1349-4c25-9585-d1f119a6988b-console-config\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.644088 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18f2ba17-1349-4c25-9585-d1f119a6988b-trusted-ca-bundle\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.657871 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/18f2ba17-1349-4c25-9585-d1f119a6988b-console-serving-cert\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.660334 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpx92\" (UniqueName: \"kubernetes.io/projected/18f2ba17-1349-4c25-9585-d1f119a6988b-kube-api-access-jpx92\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.667470 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/18f2ba17-1349-4c25-9585-d1f119a6988b-console-oauth-config\") pod \"console-859655bd7d-v8qvf\" (UID: \"18f2ba17-1349-4c25-9585-d1f119a6988b\") " pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.742166 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.742232 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-config\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.742322 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.742376 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.742410 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.742459 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-wrf89\" (UID: \"cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.742488 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdhdk\" (UniqueName: \"kubernetes.io/projected/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-kube-api-access-vdhdk\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.742537 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.742563 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.750491 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-wrf89\" (UID: \"cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.773862 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.845081 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdhdk\" (UniqueName: \"kubernetes.io/projected/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-kube-api-access-vdhdk\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.845148 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.845175 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.845300 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.845344 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-config\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.845379 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.845945 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.845999 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.846206 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.850442 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.851185 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-config\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.859990 4769 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.860041 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e301c5bbf2eab765e231bfc88a124cfef4fd657b7d84a5151dae63c839ee7d53/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.866543 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.867162 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.878797 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.883219 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdhdk\" (UniqueName: \"kubernetes.io/projected/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-kube-api-access-vdhdk\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.913595 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\") pod \"prometheus-metric-storage-0\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:31 crc kubenswrapper[4769]: I1125 10:04:31.968345 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89" Nov 25 10:04:32 crc kubenswrapper[4769]: I1125 10:04:32.216414 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.461373 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.469941 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.473556 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-fjp2f" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.473779 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.474238 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.474435 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.477010 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.499272 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.585383 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/949195eb-241c-44a0-a5ba-21fadd596967-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.585507 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/949195eb-241c-44a0-a5ba-21fadd596967-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.585555 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/949195eb-241c-44a0-a5ba-21fadd596967-config\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.585633 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x45tg\" (UniqueName: \"kubernetes.io/projected/949195eb-241c-44a0-a5ba-21fadd596967-kube-api-access-x45tg\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.585669 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.585712 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/949195eb-241c-44a0-a5ba-21fadd596967-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.585732 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/949195eb-241c-44a0-a5ba-21fadd596967-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.585773 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/949195eb-241c-44a0-a5ba-21fadd596967-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.688662 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/949195eb-241c-44a0-a5ba-21fadd596967-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.688761 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/949195eb-241c-44a0-a5ba-21fadd596967-config\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.688823 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x45tg\" (UniqueName: \"kubernetes.io/projected/949195eb-241c-44a0-a5ba-21fadd596967-kube-api-access-x45tg\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.688855 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.688890 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/949195eb-241c-44a0-a5ba-21fadd596967-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.688910 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/949195eb-241c-44a0-a5ba-21fadd596967-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.688950 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/949195eb-241c-44a0-a5ba-21fadd596967-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.689010 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/949195eb-241c-44a0-a5ba-21fadd596967-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.689488 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.689915 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/949195eb-241c-44a0-a5ba-21fadd596967-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.690330 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/949195eb-241c-44a0-a5ba-21fadd596967-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.690816 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/949195eb-241c-44a0-a5ba-21fadd596967-config\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.700214 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/949195eb-241c-44a0-a5ba-21fadd596967-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.700918 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/949195eb-241c-44a0-a5ba-21fadd596967-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.720565 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/949195eb-241c-44a0-a5ba-21fadd596967-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.721766 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x45tg\" (UniqueName: \"kubernetes.io/projected/949195eb-241c-44a0-a5ba-21fadd596967-kube-api-access-x45tg\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.738170 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-tnz6t"] Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.740016 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.752936 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-tzvw8" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.754099 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.754418 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.770730 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tnz6t"] Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.775843 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"949195eb-241c-44a0-a5ba-21fadd596967\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.792267 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a94c59b5-e672-4d10-a090-89fa82afc1f3-var-run-ovn\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.792353 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h72cm\" (UniqueName: \"kubernetes.io/projected/a94c59b5-e672-4d10-a090-89fa82afc1f3-kube-api-access-h72cm\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.792398 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a94c59b5-e672-4d10-a090-89fa82afc1f3-scripts\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.792460 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a94c59b5-e672-4d10-a090-89fa82afc1f3-ovn-controller-tls-certs\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.792510 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a94c59b5-e672-4d10-a090-89fa82afc1f3-combined-ca-bundle\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.792544 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a94c59b5-e672-4d10-a090-89fa82afc1f3-var-run\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.792581 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a94c59b5-e672-4d10-a090-89fa82afc1f3-var-log-ovn\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.803147 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.818927 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-2qznl"] Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.821743 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.837085 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-2qznl"] Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.899155 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-scripts\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.902743 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-var-log\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.903461 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a94c59b5-e672-4d10-a090-89fa82afc1f3-var-run-ovn\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.903819 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-var-run\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.904075 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a94c59b5-e672-4d10-a090-89fa82afc1f3-var-run-ovn\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.904048 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h72cm\" (UniqueName: \"kubernetes.io/projected/a94c59b5-e672-4d10-a090-89fa82afc1f3-kube-api-access-h72cm\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.904353 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a94c59b5-e672-4d10-a090-89fa82afc1f3-scripts\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.904555 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vbf2\" (UniqueName: \"kubernetes.io/projected/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-kube-api-access-8vbf2\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.905363 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a94c59b5-e672-4d10-a090-89fa82afc1f3-ovn-controller-tls-certs\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.905552 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-var-lib\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.905751 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a94c59b5-e672-4d10-a090-89fa82afc1f3-combined-ca-bundle\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.905930 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a94c59b5-e672-4d10-a090-89fa82afc1f3-var-run\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.906314 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-etc-ovs\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.906473 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a94c59b5-e672-4d10-a090-89fa82afc1f3-var-log-ovn\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.908014 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a94c59b5-e672-4d10-a090-89fa82afc1f3-scripts\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.908283 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a94c59b5-e672-4d10-a090-89fa82afc1f3-var-log-ovn\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.912726 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a94c59b5-e672-4d10-a090-89fa82afc1f3-combined-ca-bundle\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.915259 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a94c59b5-e672-4d10-a090-89fa82afc1f3-var-run\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.919750 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a94c59b5-e672-4d10-a090-89fa82afc1f3-ovn-controller-tls-certs\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:33 crc kubenswrapper[4769]: I1125 10:04:33.938317 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h72cm\" (UniqueName: \"kubernetes.io/projected/a94c59b5-e672-4d10-a090-89fa82afc1f3-kube-api-access-h72cm\") pod \"ovn-controller-tnz6t\" (UID: \"a94c59b5-e672-4d10-a090-89fa82afc1f3\") " pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.013568 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-scripts\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.013671 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-var-log\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.013702 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-var-run\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.013773 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vbf2\" (UniqueName: \"kubernetes.io/projected/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-kube-api-access-8vbf2\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.013830 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-var-lib\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.013896 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-etc-ovs\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.014279 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-etc-ovs\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.016473 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-var-log\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.016544 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-var-run\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.016928 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-scripts\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.018118 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-var-lib\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.035526 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vbf2\" (UniqueName: \"kubernetes.io/projected/2d073f77-c21a-401e-8fd2-e6f08d2bcf17-kube-api-access-8vbf2\") pod \"ovn-controller-ovs-2qznl\" (UID: \"2d073f77-c21a-401e-8fd2-e6f08d2bcf17\") " pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.186383 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:04:34 crc kubenswrapper[4769]: I1125 10:04:34.187302 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnz6t" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.619955 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.623666 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.629404 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.629514 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.629900 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.629906 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-khqxn" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.638650 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.724446 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.724495 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.724544 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghtsf\" (UniqueName: \"kubernetes.io/projected/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-kube-api-access-ghtsf\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.724570 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.724918 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.725007 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.725122 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.725492 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-config\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.829002 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.829093 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.829144 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghtsf\" (UniqueName: \"kubernetes.io/projected/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-kube-api-access-ghtsf\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.829176 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.829270 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.829303 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.829364 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.830077 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.830136 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.830500 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-config\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.830625 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.831614 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-config\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.837211 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.837699 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.837737 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.850575 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghtsf\" (UniqueName: \"kubernetes.io/projected/80bf45ba-a6bb-4e24-86c8-1234bb9f61f0-kube-api-access-ghtsf\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.867359 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-nb-0\" (UID: \"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:37 crc kubenswrapper[4769]: I1125 10:04:37.963022 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 10:04:41 crc kubenswrapper[4769]: I1125 10:04:41.621712 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 10:04:43 crc kubenswrapper[4769]: E1125 10:04:43.972504 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 10:04:43 crc kubenswrapper[4769]: E1125 10:04:43.973530 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ssrvf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-9vg8m_openstack(50d6488a-84f8-41e0-b971-49822125edf2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:04:43 crc kubenswrapper[4769]: E1125 10:04:43.975292 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" podUID="50d6488a-84f8-41e0-b971-49822125edf2" Nov 25 10:04:43 crc kubenswrapper[4769]: W1125 10:04:43.975393 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c646d86_e48c_4d40_8370_1736c484875f.slice/crio-92386d898338e13784a313c9f3b233ae316b17a3947c84a5f36d7d078549818d WatchSource:0}: Error finding container 92386d898338e13784a313c9f3b233ae316b17a3947c84a5f36d7d078549818d: Status 404 returned error can't find the container with id 92386d898338e13784a313c9f3b233ae316b17a3947c84a5f36d7d078549818d Nov 25 10:04:44 crc kubenswrapper[4769]: E1125 10:04:44.007584 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 10:04:44 crc kubenswrapper[4769]: E1125 10:04:44.007800 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-995fv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-jldwl_openstack(ab96ca8c-d498-4f2f-a19c-07cbe5356c21): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:04:44 crc kubenswrapper[4769]: E1125 10:04:44.008957 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" podUID="ab96ca8c-d498-4f2f-a19c-07cbe5356c21" Nov 25 10:04:44 crc kubenswrapper[4769]: I1125 10:04:44.021339 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:04:44 crc kubenswrapper[4769]: E1125 10:04:44.048286 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 10:04:44 crc kubenswrapper[4769]: E1125 10:04:44.048562 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kx55d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-5dk5k_openstack(c81cebb9-bdc4-48e9-aa6e-379e9a306c33): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:04:44 crc kubenswrapper[4769]: E1125 10:04:44.049805 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" podUID="c81cebb9-bdc4-48e9-aa6e-379e9a306c33" Nov 25 10:04:44 crc kubenswrapper[4769]: I1125 10:04:44.296160 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8c646d86-e48c-4d40-8370-1736c484875f","Type":"ContainerStarted","Data":"92386d898338e13784a313c9f3b233ae316b17a3947c84a5f36d7d078549818d"} Nov 25 10:04:44 crc kubenswrapper[4769]: E1125 10:04:44.558473 4769 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 25 10:04:44 crc kubenswrapper[4769]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/50d6488a-84f8-41e0-b971-49822125edf2/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 25 10:04:44 crc kubenswrapper[4769]: > podSandboxID="cf688d93473a051cc9829bf7a796a24d3d750124eeabb5b84898936a34a74818" Nov 25 10:04:44 crc kubenswrapper[4769]: E1125 10:04:44.559004 4769 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 25 10:04:44 crc kubenswrapper[4769]: init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ssrvf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-9vg8m_openstack(50d6488a-84f8-41e0-b971-49822125edf2): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/50d6488a-84f8-41e0-b971-49822125edf2/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 25 10:04:44 crc kubenswrapper[4769]: > logger="UnhandledError" Nov 25 10:04:44 crc kubenswrapper[4769]: E1125 10:04:44.560188 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/50d6488a-84f8-41e0-b971-49822125edf2/volume-subpaths/dns-svc/init/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" podUID="50d6488a-84f8-41e0-b971-49822125edf2" Nov 25 10:04:44 crc kubenswrapper[4769]: I1125 10:04:44.991215 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 10:04:45 crc kubenswrapper[4769]: I1125 10:04:45.315160 4769 generic.go:334] "Generic (PLEG): container finished" podID="f1ce4931-1e74-4600-87e2-5fb7c899376a" containerID="d7a8919065c198dd26acf6f4cbd280241d370817f9d9052ebe1d9bde522f2663" exitCode=0 Nov 25 10:04:45 crc kubenswrapper[4769]: I1125 10:04:45.316486 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" event={"ID":"f1ce4931-1e74-4600-87e2-5fb7c899376a","Type":"ContainerDied","Data":"d7a8919065c198dd26acf6f4cbd280241d370817f9d9052ebe1d9bde522f2663"} Nov 25 10:04:45 crc kubenswrapper[4769]: I1125 10:04:45.318487 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"8e360825-f56f-4e69-9e17-c9e78f295267","Type":"ContainerStarted","Data":"b9296265058af415cee136198a00affadd21fab1900dc56466d9454f8bfd1c9a"} Nov 25 10:04:45 crc kubenswrapper[4769]: I1125 10:04:45.419078 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:04:45 crc kubenswrapper[4769]: I1125 10:04:45.432179 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 10:04:45 crc kubenswrapper[4769]: I1125 10:04:45.440727 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:04:45 crc kubenswrapper[4769]: I1125 10:04:45.444830 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tnz6t"] Nov 25 10:04:45 crc kubenswrapper[4769]: I1125 10:04:45.606914 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89"] Nov 25 10:04:45 crc kubenswrapper[4769]: I1125 10:04:45.625115 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-859655bd7d-v8qvf"] Nov 25 10:04:45 crc kubenswrapper[4769]: W1125 10:04:45.690453 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52438bf8_8800_4078_bc88_63033a83dd2e.slice/crio-53f2433323d4f118c188f214b1123a29e61aee4eca07d861db2528b67dbaa0b1 WatchSource:0}: Error finding container 53f2433323d4f118c188f214b1123a29e61aee4eca07d861db2528b67dbaa0b1: Status 404 returned error can't find the container with id 53f2433323d4f118c188f214b1123a29e61aee4eca07d861db2528b67dbaa0b1 Nov 25 10:04:45 crc kubenswrapper[4769]: W1125 10:04:45.793532 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod75a23be3_741b_4e4e_a168_4cc37f54073d.slice/crio-1a9b71c7d4810835bc018756aff653f9f7d1e01184b99eb3ca28638b2a8f3c8c WatchSource:0}: Error finding container 1a9b71c7d4810835bc018756aff653f9f7d1e01184b99eb3ca28638b2a8f3c8c: Status 404 returned error can't find the container with id 1a9b71c7d4810835bc018756aff653f9f7d1e01184b99eb3ca28638b2a8f3c8c Nov 25 10:04:45 crc kubenswrapper[4769]: I1125 10:04:45.845773 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 10:04:46 crc kubenswrapper[4769]: W1125 10:04:46.022350 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod80bf45ba_a6bb_4e24_86c8_1234bb9f61f0.slice/crio-56d2f526f14b34aca8538c868e92484fd2892c39d6bc1a07fc82dc119c6c89e0 WatchSource:0}: Error finding container 56d2f526f14b34aca8538c868e92484fd2892c39d6bc1a07fc82dc119c6c89e0: Status 404 returned error can't find the container with id 56d2f526f14b34aca8538c868e92484fd2892c39d6bc1a07fc82dc119c6c89e0 Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.190929 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.214375 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.343266 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.345882 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.351374 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kx55d\" (UniqueName: \"kubernetes.io/projected/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-kube-api-access-kx55d\") pod \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\" (UID: \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\") " Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.351522 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-dns-svc\") pod \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\" (UID: \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\") " Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.351642 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-995fv\" (UniqueName: \"kubernetes.io/projected/ab96ca8c-d498-4f2f-a19c-07cbe5356c21-kube-api-access-995fv\") pod \"ab96ca8c-d498-4f2f-a19c-07cbe5356c21\" (UID: \"ab96ca8c-d498-4f2f-a19c-07cbe5356c21\") " Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.351791 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-config\") pod \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\" (UID: \"c81cebb9-bdc4-48e9-aa6e-379e9a306c33\") " Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.351830 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab96ca8c-d498-4f2f-a19c-07cbe5356c21-config\") pod \"ab96ca8c-d498-4f2f-a19c-07cbe5356c21\" (UID: \"ab96ca8c-d498-4f2f-a19c-07cbe5356c21\") " Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.354169 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab96ca8c-d498-4f2f-a19c-07cbe5356c21-config" (OuterVolumeSpecName: "config") pod "ab96ca8c-d498-4f2f-a19c-07cbe5356c21" (UID: "ab96ca8c-d498-4f2f-a19c-07cbe5356c21"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.354303 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c81cebb9-bdc4-48e9-aa6e-379e9a306c33" (UID: "c81cebb9-bdc4-48e9-aa6e-379e9a306c33"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.355100 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-config" (OuterVolumeSpecName: "config") pod "c81cebb9-bdc4-48e9-aa6e-379e9a306c33" (UID: "c81cebb9-bdc4-48e9-aa6e-379e9a306c33"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.359807 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-kube-api-access-kx55d" (OuterVolumeSpecName: "kube-api-access-kx55d") pod "c81cebb9-bdc4-48e9-aa6e-379e9a306c33" (UID: "c81cebb9-bdc4-48e9-aa6e-379e9a306c33"). InnerVolumeSpecName "kube-api-access-kx55d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.364645 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab96ca8c-d498-4f2f-a19c-07cbe5356c21-kube-api-access-995fv" (OuterVolumeSpecName: "kube-api-access-995fv") pod "ab96ca8c-d498-4f2f-a19c-07cbe5356c21" (UID: "ab96ca8c-d498-4f2f-a19c-07cbe5356c21"). InnerVolumeSpecName "kube-api-access-995fv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.367567 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnz6t" event={"ID":"a94c59b5-e672-4d10-a090-89fa82afc1f3","Type":"ContainerStarted","Data":"4b2fe0e3e6c1c05e5ec83ce95a0a2f44cbfec64c2cc9dfdd39d587d0621a4e02"} Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.367626 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7","Type":"ContainerStarted","Data":"b47fda3a9ca3e2463e0f5133548b16a13692519f2cde980e87301ac4180a5508"} Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.367646 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-859655bd7d-v8qvf" event={"ID":"18f2ba17-1349-4c25-9585-d1f119a6988b","Type":"ContainerStarted","Data":"91f9752620a19e9184cfc9864b0991170aebbb9a02d31cddf96d9ddf815941ca"} Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.367663 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0","Type":"ContainerStarted","Data":"56d2f526f14b34aca8538c868e92484fd2892c39d6bc1a07fc82dc119c6c89e0"} Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.367677 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"75a23be3-741b-4e4e-a168-4cc37f54073d","Type":"ContainerStarted","Data":"1a9b71c7d4810835bc018756aff653f9f7d1e01184b99eb3ca28638b2a8f3c8c"} Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.367691 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4857aaf8-4133-4c20-bc8c-d4d195091176","Type":"ContainerStarted","Data":"2a413543e21b9469c261c3a61cde7924060f4282a41e7a3c3da17759fd57caf4"} Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.367707 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"981369ae-93f2-4c25-bdea-d3d89686b0d5","Type":"ContainerStarted","Data":"e81a4a46bd061521e1409da211c03d0f8532e14a5cf15cf52d40cc95b4cda39d"} Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.367721 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-5dk5k" event={"ID":"c81cebb9-bdc4-48e9-aa6e-379e9a306c33","Type":"ContainerDied","Data":"a4435bfe219190e8eba28d9d9943de182b114744ca8bb5efc697d807e13b9c5e"} Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.367740 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-jldwl" event={"ID":"ab96ca8c-d498-4f2f-a19c-07cbe5356c21","Type":"ContainerDied","Data":"b48cea0c55c337a6c81d31173cec0e314cffda1c155dead024042c52ec31825d"} Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.367754 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"52438bf8-8800-4078-bc88-63033a83dd2e","Type":"ContainerStarted","Data":"53f2433323d4f118c188f214b1123a29e61aee4eca07d861db2528b67dbaa0b1"} Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.367768 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89" event={"ID":"cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59","Type":"ContainerStarted","Data":"1be03849d9b846b9a20a941a4b095019c95c71de3a26d2a74ca6533e8181d1e3"} Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.454877 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.456150 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab96ca8c-d498-4f2f-a19c-07cbe5356c21-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.456403 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kx55d\" (UniqueName: \"kubernetes.io/projected/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-kube-api-access-kx55d\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.456434 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c81cebb9-bdc4-48e9-aa6e-379e9a306c33-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.456464 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-995fv\" (UniqueName: \"kubernetes.io/projected/ab96ca8c-d498-4f2f-a19c-07cbe5356c21-kube-api-access-995fv\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.493207 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.716869 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jldwl"] Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.731338 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jldwl"] Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.749404 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-5dk5k"] Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.756718 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-5dk5k"] Nov 25 10:04:46 crc kubenswrapper[4769]: I1125 10:04:46.952171 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-2qznl"] Nov 25 10:04:48 crc kubenswrapper[4769]: I1125 10:04:48.278816 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab96ca8c-d498-4f2f-a19c-07cbe5356c21" path="/var/lib/kubelet/pods/ab96ca8c-d498-4f2f-a19c-07cbe5356c21/volumes" Nov 25 10:04:48 crc kubenswrapper[4769]: I1125 10:04:48.281299 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c81cebb9-bdc4-48e9-aa6e-379e9a306c33" path="/var/lib/kubelet/pods/c81cebb9-bdc4-48e9-aa6e-379e9a306c33/volumes" Nov 25 10:04:48 crc kubenswrapper[4769]: I1125 10:04:48.379051 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2qznl" event={"ID":"2d073f77-c21a-401e-8fd2-e6f08d2bcf17","Type":"ContainerStarted","Data":"428b9b3f4a7313004bd98d1440cec832bd87d1a11fbe32fd72c90e2da2d74f14"} Nov 25 10:04:48 crc kubenswrapper[4769]: I1125 10:04:48.382898 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-859655bd7d-v8qvf" event={"ID":"18f2ba17-1349-4c25-9585-d1f119a6988b","Type":"ContainerStarted","Data":"82be6afd04910427eec4ed1c856a393f61922b1143670b1db955a3ebb1377926"} Nov 25 10:04:48 crc kubenswrapper[4769]: I1125 10:04:48.385835 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" event={"ID":"f1ce4931-1e74-4600-87e2-5fb7c899376a","Type":"ContainerStarted","Data":"a10b3e98aab1d5a2cb6cc35d07c90780303ef58acf939552a0a4441043f9c7a5"} Nov 25 10:04:48 crc kubenswrapper[4769]: I1125 10:04:48.386154 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:48 crc kubenswrapper[4769]: I1125 10:04:48.387255 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"949195eb-241c-44a0-a5ba-21fadd596967","Type":"ContainerStarted","Data":"a65f0fcb2c90ebee1b83e810b527cfebbddbe2b445803c876e684b0442946ba0"} Nov 25 10:04:48 crc kubenswrapper[4769]: I1125 10:04:48.388875 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"8c646d86-e48c-4d40-8370-1736c484875f","Type":"ContainerStarted","Data":"75c4aef92a2db4c9ca25a008bc05580872558f0ca96c5917488022151821c07b"} Nov 25 10:04:48 crc kubenswrapper[4769]: I1125 10:04:48.389646 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 25 10:04:48 crc kubenswrapper[4769]: I1125 10:04:48.417208 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-859655bd7d-v8qvf" podStartSLOduration=17.417186401 podStartE2EDuration="17.417186401s" podCreationTimestamp="2025-11-25 10:04:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:04:48.407203641 +0000 UTC m=+1236.992175954" watchObservedRunningTime="2025-11-25 10:04:48.417186401 +0000 UTC m=+1237.002158714" Nov 25 10:04:48 crc kubenswrapper[4769]: I1125 10:04:48.434031 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" podStartSLOduration=5.907036808 podStartE2EDuration="25.434002898s" podCreationTimestamp="2025-11-25 10:04:23 +0000 UTC" firstStartedPulling="2025-11-25 10:04:24.758745048 +0000 UTC m=+1213.343717361" lastFinishedPulling="2025-11-25 10:04:44.285711128 +0000 UTC m=+1232.870683451" observedRunningTime="2025-11-25 10:04:48.42984188 +0000 UTC m=+1237.014814193" watchObservedRunningTime="2025-11-25 10:04:48.434002898 +0000 UTC m=+1237.018975211" Nov 25 10:04:48 crc kubenswrapper[4769]: I1125 10:04:48.464095 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=17.868431819 podStartE2EDuration="21.46407167s" podCreationTimestamp="2025-11-25 10:04:27 +0000 UTC" firstStartedPulling="2025-11-25 10:04:44.021013981 +0000 UTC m=+1232.605986284" lastFinishedPulling="2025-11-25 10:04:47.616653822 +0000 UTC m=+1236.201626135" observedRunningTime="2025-11-25 10:04:48.455004575 +0000 UTC m=+1237.039976888" watchObservedRunningTime="2025-11-25 10:04:48.46407167 +0000 UTC m=+1237.049043983" Nov 25 10:04:51 crc kubenswrapper[4769]: I1125 10:04:51.775016 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:51 crc kubenswrapper[4769]: I1125 10:04:51.778385 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:51 crc kubenswrapper[4769]: I1125 10:04:51.782890 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:52 crc kubenswrapper[4769]: I1125 10:04:52.476286 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-859655bd7d-v8qvf" Nov 25 10:04:52 crc kubenswrapper[4769]: I1125 10:04:52.555356 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5b87c94878-dctn4"] Nov 25 10:04:53 crc kubenswrapper[4769]: I1125 10:04:53.409131 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 25 10:04:54 crc kubenswrapper[4769]: I1125 10:04:54.041182 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:04:54 crc kubenswrapper[4769]: I1125 10:04:54.106289 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-9vg8m"] Nov 25 10:04:58 crc kubenswrapper[4769]: I1125 10:04:58.888729 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:58 crc kubenswrapper[4769]: I1125 10:04:58.956653 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50d6488a-84f8-41e0-b971-49822125edf2-dns-svc\") pod \"50d6488a-84f8-41e0-b971-49822125edf2\" (UID: \"50d6488a-84f8-41e0-b971-49822125edf2\") " Nov 25 10:04:58 crc kubenswrapper[4769]: I1125 10:04:58.956971 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ssrvf\" (UniqueName: \"kubernetes.io/projected/50d6488a-84f8-41e0-b971-49822125edf2-kube-api-access-ssrvf\") pod \"50d6488a-84f8-41e0-b971-49822125edf2\" (UID: \"50d6488a-84f8-41e0-b971-49822125edf2\") " Nov 25 10:04:58 crc kubenswrapper[4769]: I1125 10:04:58.957290 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50d6488a-84f8-41e0-b971-49822125edf2-config\") pod \"50d6488a-84f8-41e0-b971-49822125edf2\" (UID: \"50d6488a-84f8-41e0-b971-49822125edf2\") " Nov 25 10:04:58 crc kubenswrapper[4769]: I1125 10:04:58.975911 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50d6488a-84f8-41e0-b971-49822125edf2-kube-api-access-ssrvf" (OuterVolumeSpecName: "kube-api-access-ssrvf") pod "50d6488a-84f8-41e0-b971-49822125edf2" (UID: "50d6488a-84f8-41e0-b971-49822125edf2"). InnerVolumeSpecName "kube-api-access-ssrvf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:04:58 crc kubenswrapper[4769]: I1125 10:04:58.992715 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50d6488a-84f8-41e0-b971-49822125edf2-config" (OuterVolumeSpecName: "config") pod "50d6488a-84f8-41e0-b971-49822125edf2" (UID: "50d6488a-84f8-41e0-b971-49822125edf2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:04:58 crc kubenswrapper[4769]: I1125 10:04:58.998657 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50d6488a-84f8-41e0-b971-49822125edf2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "50d6488a-84f8-41e0-b971-49822125edf2" (UID: "50d6488a-84f8-41e0-b971-49822125edf2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:04:59 crc kubenswrapper[4769]: I1125 10:04:59.059666 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50d6488a-84f8-41e0-b971-49822125edf2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:59 crc kubenswrapper[4769]: I1125 10:04:59.059707 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ssrvf\" (UniqueName: \"kubernetes.io/projected/50d6488a-84f8-41e0-b971-49822125edf2-kube-api-access-ssrvf\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:59 crc kubenswrapper[4769]: I1125 10:04:59.059719 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50d6488a-84f8-41e0-b971-49822125edf2-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:04:59 crc kubenswrapper[4769]: I1125 10:04:59.575728 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" event={"ID":"50d6488a-84f8-41e0-b971-49822125edf2","Type":"ContainerDied","Data":"cf688d93473a051cc9829bf7a796a24d3d750124eeabb5b84898936a34a74818"} Nov 25 10:04:59 crc kubenswrapper[4769]: I1125 10:04:59.575822 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-9vg8m" Nov 25 10:04:59 crc kubenswrapper[4769]: I1125 10:04:59.649510 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-9vg8m"] Nov 25 10:04:59 crc kubenswrapper[4769]: I1125 10:04:59.660296 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-9vg8m"] Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.265711 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50d6488a-84f8-41e0-b971-49822125edf2" path="/var/lib/kubelet/pods/50d6488a-84f8-41e0-b971-49822125edf2/volumes" Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.723972 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-72c4b"] Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.726002 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.754099 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-72c4b"] Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.828295 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb46db53-8238-4089-9d9c-e4e70675ba4d-config\") pod \"dnsmasq-dns-7cb5889db5-72c4b\" (UID: \"bb46db53-8238-4089-9d9c-e4e70675ba4d\") " pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.828428 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb46db53-8238-4089-9d9c-e4e70675ba4d-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-72c4b\" (UID: \"bb46db53-8238-4089-9d9c-e4e70675ba4d\") " pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.828483 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn4gd\" (UniqueName: \"kubernetes.io/projected/bb46db53-8238-4089-9d9c-e4e70675ba4d-kube-api-access-sn4gd\") pod \"dnsmasq-dns-7cb5889db5-72c4b\" (UID: \"bb46db53-8238-4089-9d9c-e4e70675ba4d\") " pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.938854 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb46db53-8238-4089-9d9c-e4e70675ba4d-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-72c4b\" (UID: \"bb46db53-8238-4089-9d9c-e4e70675ba4d\") " pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.938996 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn4gd\" (UniqueName: \"kubernetes.io/projected/bb46db53-8238-4089-9d9c-e4e70675ba4d-kube-api-access-sn4gd\") pod \"dnsmasq-dns-7cb5889db5-72c4b\" (UID: \"bb46db53-8238-4089-9d9c-e4e70675ba4d\") " pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.939102 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb46db53-8238-4089-9d9c-e4e70675ba4d-config\") pod \"dnsmasq-dns-7cb5889db5-72c4b\" (UID: \"bb46db53-8238-4089-9d9c-e4e70675ba4d\") " pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.941245 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb46db53-8238-4089-9d9c-e4e70675ba4d-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-72c4b\" (UID: \"bb46db53-8238-4089-9d9c-e4e70675ba4d\") " pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.942439 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb46db53-8238-4089-9d9c-e4e70675ba4d-config\") pod \"dnsmasq-dns-7cb5889db5-72c4b\" (UID: \"bb46db53-8238-4089-9d9c-e4e70675ba4d\") " pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:00 crc kubenswrapper[4769]: I1125 10:05:00.980114 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn4gd\" (UniqueName: \"kubernetes.io/projected/bb46db53-8238-4089-9d9c-e4e70675ba4d-kube-api-access-sn4gd\") pod \"dnsmasq-dns-7cb5889db5-72c4b\" (UID: \"bb46db53-8238-4089-9d9c-e4e70675ba4d\") " pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:01 crc kubenswrapper[4769]: I1125 10:05:01.062209 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:01 crc kubenswrapper[4769]: E1125 10:05:01.307287 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 25 10:05:01 crc kubenswrapper[4769]: E1125 10:05:01.307377 4769 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 25 10:05:01 crc kubenswrapper[4769]: E1125 10:05:01.307535 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lskqw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(75a23be3-741b-4e4e-a168-4cc37f54073d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 10:05:01 crc kubenswrapper[4769]: E1125 10:05:01.308678 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="75a23be3-741b-4e4e-a168-4cc37f54073d" Nov 25 10:05:01 crc kubenswrapper[4769]: E1125 10:05:01.631425 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="75a23be3-741b-4e4e-a168-4cc37f54073d" Nov 25 10:05:01 crc kubenswrapper[4769]: I1125 10:05:01.927435 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 25 10:05:01 crc kubenswrapper[4769]: I1125 10:05:01.936345 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 10:05:01 crc kubenswrapper[4769]: I1125 10:05:01.943646 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 10:05:01 crc kubenswrapper[4769]: I1125 10:05:01.943758 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-vjwbl" Nov 25 10:05:01 crc kubenswrapper[4769]: I1125 10:05:01.943914 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 25 10:05:01 crc kubenswrapper[4769]: I1125 10:05:01.944922 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 25 10:05:01 crc kubenswrapper[4769]: I1125 10:05:01.952580 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.018621 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-72c4b"] Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.073764 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfftm\" (UniqueName: \"kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-kube-api-access-xfftm\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.073837 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/f7987cb4-5485-438f-bc01-c69e509b81a6-cache\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.073871 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.073898 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.074246 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/f7987cb4-5485-438f-bc01-c69e509b81a6-lock\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: W1125 10:05:02.080232 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb46db53_8238_4089_9d9c_e4e70675ba4d.slice/crio-8e167e393f2f86d3aa891804098ebbebf923154a4c1f6541680ce354f19d7bcb WatchSource:0}: Error finding container 8e167e393f2f86d3aa891804098ebbebf923154a4c1f6541680ce354f19d7bcb: Status 404 returned error can't find the container with id 8e167e393f2f86d3aa891804098ebbebf923154a4c1f6541680ce354f19d7bcb Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.177198 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/f7987cb4-5485-438f-bc01-c69e509b81a6-lock\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.177841 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfftm\" (UniqueName: \"kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-kube-api-access-xfftm\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.177868 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/f7987cb4-5485-438f-bc01-c69e509b81a6-cache\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.177895 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.177945 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.178349 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.181566 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/f7987cb4-5485-438f-bc01-c69e509b81a6-lock\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.181814 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/f7987cb4-5485-438f-bc01-c69e509b81a6-cache\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: E1125 10:05:02.181908 4769 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 10:05:02 crc kubenswrapper[4769]: E1125 10:05:02.181927 4769 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 10:05:02 crc kubenswrapper[4769]: E1125 10:05:02.181989 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift podName:f7987cb4-5485-438f-bc01-c69e509b81a6 nodeName:}" failed. No retries permitted until 2025-11-25 10:05:02.681953873 +0000 UTC m=+1251.266926176 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift") pod "swift-storage-0" (UID: "f7987cb4-5485-438f-bc01-c69e509b81a6") : configmap "swift-ring-files" not found Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.203353 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfftm\" (UniqueName: \"kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-kube-api-access-xfftm\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.212165 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.585771 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-mq88x"] Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.597746 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.604821 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.605277 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.605663 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.613206 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-mq88x"] Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.666917 4769 generic.go:334] "Generic (PLEG): container finished" podID="bb46db53-8238-4089-9d9c-e4e70675ba4d" containerID="eed2ce8ef5b37720cd1ddd1769e6c46374cc165f6e43cb5927140c0586613841" exitCode=0 Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.667041 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" event={"ID":"bb46db53-8238-4089-9d9c-e4e70675ba4d","Type":"ContainerDied","Data":"eed2ce8ef5b37720cd1ddd1769e6c46374cc165f6e43cb5927140c0586613841"} Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.667073 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" event={"ID":"bb46db53-8238-4089-9d9c-e4e70675ba4d","Type":"ContainerStarted","Data":"8e167e393f2f86d3aa891804098ebbebf923154a4c1f6541680ce354f19d7bcb"} Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.671241 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89" event={"ID":"cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59","Type":"ContainerStarted","Data":"2fa847f96aea2525d194b4c05183a5eb5f54059727777ec81df3ad81bcb8cb62"} Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.673927 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"949195eb-241c-44a0-a5ba-21fadd596967","Type":"ContainerStarted","Data":"939cc7eb7c522062438a51fb545815b6dffcf284f85e6f3eaf2957ee209c82f1"} Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.695049 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnz6t" event={"ID":"a94c59b5-e672-4d10-a090-89fa82afc1f3","Type":"ContainerStarted","Data":"51308322273edb2d88aa272aa4a6ee1f1c666415d157713b49d104f09a0ca863"} Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.695094 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-tnz6t" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.695252 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e5767ed-9248-4df0-945f-817154feb33c-scripts\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.695309 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6e5767ed-9248-4df0-945f-817154feb33c-ring-data-devices\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.695332 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-combined-ca-bundle\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.695359 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-dispersionconf\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.695386 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8fvw\" (UniqueName: \"kubernetes.io/projected/6e5767ed-9248-4df0-945f-817154feb33c-kube-api-access-r8fvw\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.695412 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-swiftconf\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.695430 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6e5767ed-9248-4df0-945f-817154feb33c-etc-swift\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.695500 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:02 crc kubenswrapper[4769]: E1125 10:05:02.695684 4769 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 10:05:02 crc kubenswrapper[4769]: E1125 10:05:02.695699 4769 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 10:05:02 crc kubenswrapper[4769]: E1125 10:05:02.695747 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift podName:f7987cb4-5485-438f-bc01-c69e509b81a6 nodeName:}" failed. No retries permitted until 2025-11-25 10:05:03.695731001 +0000 UTC m=+1252.280703314 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift") pod "swift-storage-0" (UID: "f7987cb4-5485-438f-bc01-c69e509b81a6") : configmap "swift-ring-files" not found Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.715536 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"52438bf8-8800-4078-bc88-63033a83dd2e","Type":"ContainerStarted","Data":"30b1f98a30a0551a689c2fae8fd26909a8d4c10090c055653291445576c5ebd5"} Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.724370 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-wrf89" podStartSLOduration=19.184603857 podStartE2EDuration="32.724348525s" podCreationTimestamp="2025-11-25 10:04:30 +0000 UTC" firstStartedPulling="2025-11-25 10:04:46.010876044 +0000 UTC m=+1234.595848367" lastFinishedPulling="2025-11-25 10:04:59.550620722 +0000 UTC m=+1248.135593035" observedRunningTime="2025-11-25 10:05:02.721203573 +0000 UTC m=+1251.306175886" watchObservedRunningTime="2025-11-25 10:05:02.724348525 +0000 UTC m=+1251.309320838" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.740259 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"8e360825-f56f-4e69-9e17-c9e78f295267","Type":"ContainerStarted","Data":"95053adeb8346fcddb8f40fb5fe4b54624f8a3af8a52e5d18ee8e610ccd62d3e"} Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.767126 4769 generic.go:334] "Generic (PLEG): container finished" podID="2d073f77-c21a-401e-8fd2-e6f08d2bcf17" containerID="fa2f9c608ddf05b6cb374fe628cabeb190c9abfe6762526dda9aa0d30ab37d63" exitCode=0 Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.767222 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2qznl" event={"ID":"2d073f77-c21a-401e-8fd2-e6f08d2bcf17","Type":"ContainerDied","Data":"fa2f9c608ddf05b6cb374fe628cabeb190c9abfe6762526dda9aa0d30ab37d63"} Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.774173 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0","Type":"ContainerStarted","Data":"98ce6a1960d54d400003e15f6530a2bbe11c15821fb21d7fbbe037fa2d000101"} Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.800597 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e5767ed-9248-4df0-945f-817154feb33c-scripts\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.800702 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6e5767ed-9248-4df0-945f-817154feb33c-ring-data-devices\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.800762 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-combined-ca-bundle\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.800795 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-dispersionconf\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.801469 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8fvw\" (UniqueName: \"kubernetes.io/projected/6e5767ed-9248-4df0-945f-817154feb33c-kube-api-access-r8fvw\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.801557 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-swiftconf\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.801605 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6e5767ed-9248-4df0-945f-817154feb33c-etc-swift\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.802603 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6e5767ed-9248-4df0-945f-817154feb33c-etc-swift\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.807308 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6e5767ed-9248-4df0-945f-817154feb33c-ring-data-devices\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.807548 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e5767ed-9248-4df0-945f-817154feb33c-scripts\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.808540 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-swiftconf\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.816314 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-tnz6t" podStartSLOduration=15.597173754 podStartE2EDuration="29.816276447s" podCreationTimestamp="2025-11-25 10:04:33 +0000 UTC" firstStartedPulling="2025-11-25 10:04:46.001315805 +0000 UTC m=+1234.586288118" lastFinishedPulling="2025-11-25 10:05:00.220418498 +0000 UTC m=+1248.805390811" observedRunningTime="2025-11-25 10:05:02.757429296 +0000 UTC m=+1251.342401609" watchObservedRunningTime="2025-11-25 10:05:02.816276447 +0000 UTC m=+1251.401248760" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.825578 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-dispersionconf\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.828381 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8fvw\" (UniqueName: \"kubernetes.io/projected/6e5767ed-9248-4df0-945f-817154feb33c-kube-api-access-r8fvw\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.829009 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-combined-ca-bundle\") pod \"swift-ring-rebalance-mq88x\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:02 crc kubenswrapper[4769]: I1125 10:05:02.931847 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:03 crc kubenswrapper[4769]: I1125 10:05:03.581105 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-mq88x"] Nov 25 10:05:03 crc kubenswrapper[4769]: I1125 10:05:03.696254 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:03 crc kubenswrapper[4769]: E1125 10:05:03.696441 4769 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 10:05:03 crc kubenswrapper[4769]: E1125 10:05:03.696573 4769 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 10:05:03 crc kubenswrapper[4769]: E1125 10:05:03.696648 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift podName:f7987cb4-5485-438f-bc01-c69e509b81a6 nodeName:}" failed. No retries permitted until 2025-11-25 10:05:05.696621342 +0000 UTC m=+1254.281593655 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift") pod "swift-storage-0" (UID: "f7987cb4-5485-438f-bc01-c69e509b81a6") : configmap "swift-ring-files" not found Nov 25 10:05:03 crc kubenswrapper[4769]: I1125 10:05:03.815495 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" event={"ID":"bb46db53-8238-4089-9d9c-e4e70675ba4d","Type":"ContainerStarted","Data":"c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6"} Nov 25 10:05:03 crc kubenswrapper[4769]: I1125 10:05:03.817481 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:03 crc kubenswrapper[4769]: I1125 10:05:03.838001 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-mq88x" event={"ID":"6e5767ed-9248-4df0-945f-817154feb33c","Type":"ContainerStarted","Data":"3e843af8a46c232fc596586728d57b57b32faf3683eca0ebbfca9eb3b53c0912"} Nov 25 10:05:03 crc kubenswrapper[4769]: I1125 10:05:03.844553 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2qznl" event={"ID":"2d073f77-c21a-401e-8fd2-e6f08d2bcf17","Type":"ContainerStarted","Data":"d210153b14b6753513ecb2762d4bbd050babffaa86674aef8a08655864f48d07"} Nov 25 10:05:03 crc kubenswrapper[4769]: I1125 10:05:03.881612 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" podStartSLOduration=3.8815793039999997 podStartE2EDuration="3.881579304s" podCreationTimestamp="2025-11-25 10:05:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:05:03.867135498 +0000 UTC m=+1252.452107801" watchObservedRunningTime="2025-11-25 10:05:03.881579304 +0000 UTC m=+1252.466551617" Nov 25 10:05:04 crc kubenswrapper[4769]: I1125 10:05:04.860244 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7","Type":"ContainerStarted","Data":"577c38364846cc0e6bff04d1a0928fad6b76b4becbf8bafa72df7fffac346f1f"} Nov 25 10:05:04 crc kubenswrapper[4769]: I1125 10:05:04.866156 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-2qznl" event={"ID":"2d073f77-c21a-401e-8fd2-e6f08d2bcf17","Type":"ContainerStarted","Data":"15ac2817ef721c599fbe4b627508fd51502e40f79cbd88121294dd2483b04c96"} Nov 25 10:05:04 crc kubenswrapper[4769]: I1125 10:05:04.866457 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:05:04 crc kubenswrapper[4769]: I1125 10:05:04.924989 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-2qznl" podStartSLOduration=19.705954785 podStartE2EDuration="31.92494676s" podCreationTimestamp="2025-11-25 10:04:33 +0000 UTC" firstStartedPulling="2025-11-25 10:04:47.593622413 +0000 UTC m=+1236.178594726" lastFinishedPulling="2025-11-25 10:04:59.812614388 +0000 UTC m=+1248.397586701" observedRunningTime="2025-11-25 10:05:04.917149367 +0000 UTC m=+1253.502121730" watchObservedRunningTime="2025-11-25 10:05:04.92494676 +0000 UTC m=+1253.509919073" Nov 25 10:05:05 crc kubenswrapper[4769]: I1125 10:05:05.752528 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:05 crc kubenswrapper[4769]: E1125 10:05:05.752818 4769 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 10:05:05 crc kubenswrapper[4769]: E1125 10:05:05.752859 4769 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 10:05:05 crc kubenswrapper[4769]: E1125 10:05:05.752942 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift podName:f7987cb4-5485-438f-bc01-c69e509b81a6 nodeName:}" failed. No retries permitted until 2025-11-25 10:05:09.752915082 +0000 UTC m=+1258.337887395 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift") pod "swift-storage-0" (UID: "f7987cb4-5485-438f-bc01-c69e509b81a6") : configmap "swift-ring-files" not found Nov 25 10:05:05 crc kubenswrapper[4769]: I1125 10:05:05.882067 4769 generic.go:334] "Generic (PLEG): container finished" podID="52438bf8-8800-4078-bc88-63033a83dd2e" containerID="30b1f98a30a0551a689c2fae8fd26909a8d4c10090c055653291445576c5ebd5" exitCode=0 Nov 25 10:05:05 crc kubenswrapper[4769]: I1125 10:05:05.882331 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"52438bf8-8800-4078-bc88-63033a83dd2e","Type":"ContainerDied","Data":"30b1f98a30a0551a689c2fae8fd26909a8d4c10090c055653291445576c5ebd5"} Nov 25 10:05:05 crc kubenswrapper[4769]: I1125 10:05:05.883472 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:05:06 crc kubenswrapper[4769]: I1125 10:05:06.897373 4769 generic.go:334] "Generic (PLEG): container finished" podID="8e360825-f56f-4e69-9e17-c9e78f295267" containerID="95053adeb8346fcddb8f40fb5fe4b54624f8a3af8a52e5d18ee8e610ccd62d3e" exitCode=0 Nov 25 10:05:06 crc kubenswrapper[4769]: I1125 10:05:06.897856 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"8e360825-f56f-4e69-9e17-c9e78f295267","Type":"ContainerDied","Data":"95053adeb8346fcddb8f40fb5fe4b54624f8a3af8a52e5d18ee8e610ccd62d3e"} Nov 25 10:05:06 crc kubenswrapper[4769]: I1125 10:05:06.908934 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"52438bf8-8800-4078-bc88-63033a83dd2e","Type":"ContainerStarted","Data":"7699fb5bf48b56d797190b3abce5b038db7167de6ff0dcfdf201bfe7e3850e8e"} Nov 25 10:05:09 crc kubenswrapper[4769]: I1125 10:05:09.767495 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:09 crc kubenswrapper[4769]: E1125 10:05:09.767753 4769 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 10:05:09 crc kubenswrapper[4769]: E1125 10:05:09.768057 4769 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 10:05:09 crc kubenswrapper[4769]: E1125 10:05:09.768151 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift podName:f7987cb4-5485-438f-bc01-c69e509b81a6 nodeName:}" failed. No retries permitted until 2025-11-25 10:05:17.768125099 +0000 UTC m=+1266.353097422 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift") pod "swift-storage-0" (UID: "f7987cb4-5485-438f-bc01-c69e509b81a6") : configmap "swift-ring-files" not found Nov 25 10:05:11 crc kubenswrapper[4769]: I1125 10:05:11.077131 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:11 crc kubenswrapper[4769]: I1125 10:05:11.125916 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=31.600132934 podStartE2EDuration="46.125886146s" podCreationTimestamp="2025-11-25 10:04:25 +0000 UTC" firstStartedPulling="2025-11-25 10:04:45.694640236 +0000 UTC m=+1234.279612559" lastFinishedPulling="2025-11-25 10:05:00.220393458 +0000 UTC m=+1248.805365771" observedRunningTime="2025-11-25 10:05:06.946408604 +0000 UTC m=+1255.531380917" watchObservedRunningTime="2025-11-25 10:05:11.125886146 +0000 UTC m=+1259.710858459" Nov 25 10:05:11 crc kubenswrapper[4769]: I1125 10:05:11.194420 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6cxlg"] Nov 25 10:05:11 crc kubenswrapper[4769]: I1125 10:05:11.195194 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" podUID="f1ce4931-1e74-4600-87e2-5fb7c899376a" containerName="dnsmasq-dns" containerID="cri-o://a10b3e98aab1d5a2cb6cc35d07c90780303ef58acf939552a0a4441043f9c7a5" gracePeriod=10 Nov 25 10:05:12 crc kubenswrapper[4769]: I1125 10:05:12.000562 4769 generic.go:334] "Generic (PLEG): container finished" podID="f1ce4931-1e74-4600-87e2-5fb7c899376a" containerID="a10b3e98aab1d5a2cb6cc35d07c90780303ef58acf939552a0a4441043f9c7a5" exitCode=0 Nov 25 10:05:12 crc kubenswrapper[4769]: I1125 10:05:12.000661 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" event={"ID":"f1ce4931-1e74-4600-87e2-5fb7c899376a","Type":"ContainerDied","Data":"a10b3e98aab1d5a2cb6cc35d07c90780303ef58acf939552a0a4441043f9c7a5"} Nov 25 10:05:13 crc kubenswrapper[4769]: I1125 10:05:13.267714 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:05:13 crc kubenswrapper[4769]: I1125 10:05:13.364152 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1ce4931-1e74-4600-87e2-5fb7c899376a-config\") pod \"f1ce4931-1e74-4600-87e2-5fb7c899376a\" (UID: \"f1ce4931-1e74-4600-87e2-5fb7c899376a\") " Nov 25 10:05:13 crc kubenswrapper[4769]: I1125 10:05:13.364948 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1ce4931-1e74-4600-87e2-5fb7c899376a-dns-svc\") pod \"f1ce4931-1e74-4600-87e2-5fb7c899376a\" (UID: \"f1ce4931-1e74-4600-87e2-5fb7c899376a\") " Nov 25 10:05:13 crc kubenswrapper[4769]: I1125 10:05:13.365234 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsq2d\" (UniqueName: \"kubernetes.io/projected/f1ce4931-1e74-4600-87e2-5fb7c899376a-kube-api-access-xsq2d\") pod \"f1ce4931-1e74-4600-87e2-5fb7c899376a\" (UID: \"f1ce4931-1e74-4600-87e2-5fb7c899376a\") " Nov 25 10:05:13 crc kubenswrapper[4769]: I1125 10:05:13.374945 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1ce4931-1e74-4600-87e2-5fb7c899376a-kube-api-access-xsq2d" (OuterVolumeSpecName: "kube-api-access-xsq2d") pod "f1ce4931-1e74-4600-87e2-5fb7c899376a" (UID: "f1ce4931-1e74-4600-87e2-5fb7c899376a"). InnerVolumeSpecName "kube-api-access-xsq2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:13 crc kubenswrapper[4769]: I1125 10:05:13.425350 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1ce4931-1e74-4600-87e2-5fb7c899376a-config" (OuterVolumeSpecName: "config") pod "f1ce4931-1e74-4600-87e2-5fb7c899376a" (UID: "f1ce4931-1e74-4600-87e2-5fb7c899376a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:13 crc kubenswrapper[4769]: I1125 10:05:13.428377 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1ce4931-1e74-4600-87e2-5fb7c899376a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f1ce4931-1e74-4600-87e2-5fb7c899376a" (UID: "f1ce4931-1e74-4600-87e2-5fb7c899376a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:13 crc kubenswrapper[4769]: I1125 10:05:13.468513 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1ce4931-1e74-4600-87e2-5fb7c899376a-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:13 crc kubenswrapper[4769]: I1125 10:05:13.468554 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f1ce4931-1e74-4600-87e2-5fb7c899376a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:13 crc kubenswrapper[4769]: I1125 10:05:13.468567 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsq2d\" (UniqueName: \"kubernetes.io/projected/f1ce4931-1e74-4600-87e2-5fb7c899376a-kube-api-access-xsq2d\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.032955 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"949195eb-241c-44a0-a5ba-21fadd596967","Type":"ContainerStarted","Data":"0211d2bd35350e9ae6e3838e037eb296d63247109b9dfe918f99e8e353a333fe"} Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.037524 4769 generic.go:334] "Generic (PLEG): container finished" podID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerID="577c38364846cc0e6bff04d1a0928fad6b76b4becbf8bafa72df7fffac346f1f" exitCode=0 Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.037629 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7","Type":"ContainerDied","Data":"577c38364846cc0e6bff04d1a0928fad6b76b4becbf8bafa72df7fffac346f1f"} Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.039767 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-mq88x" event={"ID":"6e5767ed-9248-4df0-945f-817154feb33c","Type":"ContainerStarted","Data":"d2adae65ece1d6de11e84374d05a398eccc3c53002ca070a454c688c3c557f0a"} Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.042759 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"8e360825-f56f-4e69-9e17-c9e78f295267","Type":"ContainerStarted","Data":"1bd55cf46c3b174422094837832c19795e3e984d803cbff37194ecea711b50a0"} Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.045486 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" event={"ID":"f1ce4931-1e74-4600-87e2-5fb7c899376a","Type":"ContainerDied","Data":"c9061c6badddd8fd3f81eaf3a44f1b95246c968fd696a583d0f5f818b1f328fa"} Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.045572 4769 scope.go:117] "RemoveContainer" containerID="a10b3e98aab1d5a2cb6cc35d07c90780303ef58acf939552a0a4441043f9c7a5" Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.045829 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-6cxlg" Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.050219 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"80bf45ba-a6bb-4e24-86c8-1234bb9f61f0","Type":"ContainerStarted","Data":"b3968728e8147943bbc534dcfc37bf20555a7d3e2c7a5724a4c5f8d4710cb16d"} Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.067267 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=16.7645837 podStartE2EDuration="42.067244384s" podCreationTimestamp="2025-11-25 10:04:32 +0000 UTC" firstStartedPulling="2025-11-25 10:04:47.554994008 +0000 UTC m=+1236.139966321" lastFinishedPulling="2025-11-25 10:05:12.857654692 +0000 UTC m=+1261.442627005" observedRunningTime="2025-11-25 10:05:14.064286277 +0000 UTC m=+1262.649258590" watchObservedRunningTime="2025-11-25 10:05:14.067244384 +0000 UTC m=+1262.652216707" Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.094412 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=11.287103538 podStartE2EDuration="38.09439114s" podCreationTimestamp="2025-11-25 10:04:36 +0000 UTC" firstStartedPulling="2025-11-25 10:04:46.028695977 +0000 UTC m=+1234.613668300" lastFinishedPulling="2025-11-25 10:05:12.835983589 +0000 UTC m=+1261.420955902" observedRunningTime="2025-11-25 10:05:14.094181294 +0000 UTC m=+1262.679153607" watchObservedRunningTime="2025-11-25 10:05:14.09439114 +0000 UTC m=+1262.679363453" Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.096190 4769 scope.go:117] "RemoveContainer" containerID="d7a8919065c198dd26acf6f4cbd280241d370817f9d9052ebe1d9bde522f2663" Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.156184 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-mq88x" podStartSLOduration=2.908070961 podStartE2EDuration="12.156162207s" podCreationTimestamp="2025-11-25 10:05:02 +0000 UTC" firstStartedPulling="2025-11-25 10:05:03.610624304 +0000 UTC m=+1252.195596617" lastFinishedPulling="2025-11-25 10:05:12.85871554 +0000 UTC m=+1261.443687863" observedRunningTime="2025-11-25 10:05:14.128060406 +0000 UTC m=+1262.713032719" watchObservedRunningTime="2025-11-25 10:05:14.156162207 +0000 UTC m=+1262.741134520" Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.158474 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=33.339543717 podStartE2EDuration="48.158465887s" podCreationTimestamp="2025-11-25 10:04:26 +0000 UTC" firstStartedPulling="2025-11-25 10:04:44.9937477 +0000 UTC m=+1233.578720023" lastFinishedPulling="2025-11-25 10:04:59.81266988 +0000 UTC m=+1248.397642193" observedRunningTime="2025-11-25 10:05:14.154854673 +0000 UTC m=+1262.739826976" watchObservedRunningTime="2025-11-25 10:05:14.158465887 +0000 UTC m=+1262.743438200" Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.222135 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6cxlg"] Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.235778 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6cxlg"] Nov 25 10:05:14 crc kubenswrapper[4769]: I1125 10:05:14.250932 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1ce4931-1e74-4600-87e2-5fb7c899376a" path="/var/lib/kubelet/pods/f1ce4931-1e74-4600-87e2-5fb7c899376a/volumes" Nov 25 10:05:15 crc kubenswrapper[4769]: I1125 10:05:15.062941 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"75a23be3-741b-4e4e-a168-4cc37f54073d","Type":"ContainerStarted","Data":"fb0c20459e760fe37e7dd417e2e2c41c270679803c66c362a1a03b343d388596"} Nov 25 10:05:15 crc kubenswrapper[4769]: I1125 10:05:15.065184 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 10:05:15 crc kubenswrapper[4769]: I1125 10:05:15.096804 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=16.223636238 podStartE2EDuration="45.09678284s" podCreationTimestamp="2025-11-25 10:04:30 +0000 UTC" firstStartedPulling="2025-11-25 10:04:45.801057004 +0000 UTC m=+1234.386029317" lastFinishedPulling="2025-11-25 10:05:14.674203596 +0000 UTC m=+1263.259175919" observedRunningTime="2025-11-25 10:05:15.089663945 +0000 UTC m=+1263.674636278" watchObservedRunningTime="2025-11-25 10:05:15.09678284 +0000 UTC m=+1263.681755153" Nov 25 10:05:15 crc kubenswrapper[4769]: I1125 10:05:15.804493 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 25 10:05:15 crc kubenswrapper[4769]: I1125 10:05:15.976057 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.075879 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.121455 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.420105 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c89d5d749-r65jh"] Nov 25 10:05:16 crc kubenswrapper[4769]: E1125 10:05:16.421124 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1ce4931-1e74-4600-87e2-5fb7c899376a" containerName="init" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.421143 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1ce4931-1e74-4600-87e2-5fb7c899376a" containerName="init" Nov 25 10:05:16 crc kubenswrapper[4769]: E1125 10:05:16.421161 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1ce4931-1e74-4600-87e2-5fb7c899376a" containerName="dnsmasq-dns" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.421167 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1ce4931-1e74-4600-87e2-5fb7c899376a" containerName="dnsmasq-dns" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.421381 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1ce4931-1e74-4600-87e2-5fb7c899376a" containerName="dnsmasq-dns" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.422626 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.425414 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.431017 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c89d5d749-r65jh"] Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.438397 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-dns-svc\") pod \"dnsmasq-dns-6c89d5d749-r65jh\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.438495 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-ovsdbserver-sb\") pod \"dnsmasq-dns-6c89d5d749-r65jh\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.438552 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-config\") pod \"dnsmasq-dns-6c89d5d749-r65jh\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.439138 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc57j\" (UniqueName: \"kubernetes.io/projected/a9d2094a-20f6-43f9-b875-7da52bd2bc10-kube-api-access-nc57j\") pod \"dnsmasq-dns-6c89d5d749-r65jh\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.448126 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-whsjc"] Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.463776 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.463949 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-whsjc"] Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.466623 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.510217 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.511531 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.544392 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc57j\" (UniqueName: \"kubernetes.io/projected/a9d2094a-20f6-43f9-b875-7da52bd2bc10-kube-api-access-nc57j\") pod \"dnsmasq-dns-6c89d5d749-r65jh\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.544513 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-dns-svc\") pod \"dnsmasq-dns-6c89d5d749-r65jh\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.545605 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-dns-svc\") pod \"dnsmasq-dns-6c89d5d749-r65jh\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.545715 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-ovsdbserver-sb\") pod \"dnsmasq-dns-6c89d5d749-r65jh\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.545768 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-config\") pod \"dnsmasq-dns-6c89d5d749-r65jh\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.547844 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-ovsdbserver-sb\") pod \"dnsmasq-dns-6c89d5d749-r65jh\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.549271 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-config\") pod \"dnsmasq-dns-6c89d5d749-r65jh\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.573868 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc57j\" (UniqueName: \"kubernetes.io/projected/a9d2094a-20f6-43f9-b875-7da52bd2bc10-kube-api-access-nc57j\") pod \"dnsmasq-dns-6c89d5d749-r65jh\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.642981 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.647529 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/46820746-3540-4da9-9bf3-b43df2d4d66d-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.647584 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc2bl\" (UniqueName: \"kubernetes.io/projected/46820746-3540-4da9-9bf3-b43df2d4d66d-kube-api-access-hc2bl\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.647630 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/46820746-3540-4da9-9bf3-b43df2d4d66d-ovs-rundir\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.647694 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46820746-3540-4da9-9bf3-b43df2d4d66d-combined-ca-bundle\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.647817 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46820746-3540-4da9-9bf3-b43df2d4d66d-config\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.647875 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/46820746-3540-4da9-9bf3-b43df2d4d66d-ovn-rundir\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.744388 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c89d5d749-r65jh"] Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.750533 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46820746-3540-4da9-9bf3-b43df2d4d66d-config\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.750678 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/46820746-3540-4da9-9bf3-b43df2d4d66d-ovn-rundir\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.751152 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/46820746-3540-4da9-9bf3-b43df2d4d66d-ovn-rundir\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.750711 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/46820746-3540-4da9-9bf3-b43df2d4d66d-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.751259 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc2bl\" (UniqueName: \"kubernetes.io/projected/46820746-3540-4da9-9bf3-b43df2d4d66d-kube-api-access-hc2bl\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.751367 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46820746-3540-4da9-9bf3-b43df2d4d66d-config\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.751552 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/46820746-3540-4da9-9bf3-b43df2d4d66d-ovs-rundir\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.751294 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/46820746-3540-4da9-9bf3-b43df2d4d66d-ovs-rundir\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.751775 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46820746-3540-4da9-9bf3-b43df2d4d66d-combined-ca-bundle\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.752248 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.757421 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46820746-3540-4da9-9bf3-b43df2d4d66d-combined-ca-bundle\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.763790 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/46820746-3540-4da9-9bf3-b43df2d4d66d-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.770350 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc2bl\" (UniqueName: \"kubernetes.io/projected/46820746-3540-4da9-9bf3-b43df2d4d66d-kube-api-access-hc2bl\") pod \"ovn-controller-metrics-whsjc\" (UID: \"46820746-3540-4da9-9bf3-b43df2d4d66d\") " pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.793646 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-whsjc" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.805726 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-2tlt4"] Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.807974 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.811495 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.848950 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2tlt4"] Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.957638 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.957708 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.957762 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wg6b8\" (UniqueName: \"kubernetes.io/projected/0dffefd7-a278-4acd-87f4-0b1585dfb869-kube-api-access-wg6b8\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.957795 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-config\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.957954 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-dns-svc\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:16 crc kubenswrapper[4769]: I1125 10:05:16.963650 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.062492 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-dns-svc\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.062732 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.062762 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.063198 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wg6b8\" (UniqueName: \"kubernetes.io/projected/0dffefd7-a278-4acd-87f4-0b1585dfb869-kube-api-access-wg6b8\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.063232 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-config\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.064927 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-config\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.066164 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.066671 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.070009 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-dns-svc\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.088545 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wg6b8\" (UniqueName: \"kubernetes.io/projected/0dffefd7-a278-4acd-87f4-0b1585dfb869-kube-api-access-wg6b8\") pod \"dnsmasq-dns-698758b865-2tlt4\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.090890 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.095919 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.187569 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.237828 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.248809 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.458042 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-whsjc"] Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.481372 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.483719 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.497549 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.499074 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-zlwdz" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.499337 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.499617 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.504819 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.578358 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-config\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.578517 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.578549 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.578599 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-scripts\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.578624 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.578667 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csgnb\" (UniqueName: \"kubernetes.io/projected/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-kube-api-access-csgnb\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.578713 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.636581 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c89d5d749-r65jh"] Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.636532 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-5b87c94878-dctn4" podUID="bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" containerName="console" containerID="cri-o://cf7e8896f13a63165b978bb5c787e4d3db2b033bb5fbf646b2f283f9dd5c25e5" gracePeriod=15 Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.680844 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.680884 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.680935 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-scripts\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.680983 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.681031 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csgnb\" (UniqueName: \"kubernetes.io/projected/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-kube-api-access-csgnb\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.681074 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.681104 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-config\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.683486 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-scripts\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.684840 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-config\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.684956 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.688774 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.690761 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.692692 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.709218 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csgnb\" (UniqueName: \"kubernetes.io/projected/5a0cfe48-e8db-4d53-b3a8-68c5e724538a-kube-api-access-csgnb\") pod \"ovn-northd-0\" (UID: \"5a0cfe48-e8db-4d53-b3a8-68c5e724538a\") " pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.784347 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:17 crc kubenswrapper[4769]: E1125 10:05:17.784589 4769 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 10:05:17 crc kubenswrapper[4769]: E1125 10:05:17.784606 4769 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 10:05:17 crc kubenswrapper[4769]: E1125 10:05:17.784663 4769 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift podName:f7987cb4-5485-438f-bc01-c69e509b81a6 nodeName:}" failed. No retries permitted until 2025-11-25 10:05:33.784648103 +0000 UTC m=+1282.369620416 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift") pod "swift-storage-0" (UID: "f7987cb4-5485-438f-bc01-c69e509b81a6") : configmap "swift-ring-files" not found Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.845912 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 10:05:17 crc kubenswrapper[4769]: I1125 10:05:17.978296 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2tlt4"] Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.115449 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-whsjc" event={"ID":"46820746-3540-4da9-9bf3-b43df2d4d66d","Type":"ContainerStarted","Data":"a011fd4da3f0439cd888abfe3a4f149b0474693cc017ac27c3cfc3c1ed481b98"} Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.120402 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5b87c94878-dctn4_bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d/console/0.log" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.120484 4769 generic.go:334] "Generic (PLEG): container finished" podID="bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" containerID="cf7e8896f13a63165b978bb5c787e4d3db2b033bb5fbf646b2f283f9dd5c25e5" exitCode=2 Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.120607 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5b87c94878-dctn4" event={"ID":"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d","Type":"ContainerDied","Data":"cf7e8896f13a63165b978bb5c787e4d3db2b033bb5fbf646b2f283f9dd5c25e5"} Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.140227 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" event={"ID":"a9d2094a-20f6-43f9-b875-7da52bd2bc10","Type":"ContainerStarted","Data":"8111a45b9f2608b9d54fb789e41c6e7dc195ea71366992cbe4f5e48a8e069b93"} Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.143157 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2tlt4" event={"ID":"0dffefd7-a278-4acd-87f4-0b1585dfb869","Type":"ContainerStarted","Data":"1618b973e812b33e4c98117fc6a2c11d601c0c164ea9801edbe75661cdbaf891"} Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.260739 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5b87c94878-dctn4_bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d/console/0.log" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.261293 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5b87c94878-dctn4" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.300493 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-oauth-config\") pod \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.300597 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-oauth-serving-cert\") pod \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.300643 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-config\") pod \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.300678 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-serving-cert\") pod \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.300799 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slr5v\" (UniqueName: \"kubernetes.io/projected/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-kube-api-access-slr5v\") pod \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.300823 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-service-ca\") pod \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.300858 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-trusted-ca-bundle\") pod \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\" (UID: \"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d\") " Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.302213 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-config" (OuterVolumeSpecName: "console-config") pod "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" (UID: "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.302604 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-service-ca" (OuterVolumeSpecName: "service-ca") pod "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" (UID: "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.302993 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" (UID: "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.303284 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" (UID: "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.311237 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" (UID: "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.314102 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" (UID: "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.320129 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-kube-api-access-slr5v" (OuterVolumeSpecName: "kube-api-access-slr5v") pod "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" (UID: "bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d"). InnerVolumeSpecName "kube-api-access-slr5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.326139 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-b404-account-create-58x4q"] Nov 25 10:05:18 crc kubenswrapper[4769]: E1125 10:05:18.326911 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" containerName="console" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.326953 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" containerName="console" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.327186 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" containerName="console" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.327918 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b404-account-create-58x4q" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.330919 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.336085 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.336123 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.342940 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-b404-account-create-58x4q"] Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.404610 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slr5v\" (UniqueName: \"kubernetes.io/projected/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-kube-api-access-slr5v\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.404661 4769 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.404677 4769 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.404690 4769 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.404704 4769 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.404712 4769 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.404721 4769 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.506508 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.507572 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbee15e7-80d1-4d49-8da5-ea268221ed4f-operator-scripts\") pod \"placement-b404-account-create-58x4q\" (UID: \"dbee15e7-80d1-4d49-8da5-ea268221ed4f\") " pod="openstack/placement-b404-account-create-58x4q" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.507650 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5xsf\" (UniqueName: \"kubernetes.io/projected/dbee15e7-80d1-4d49-8da5-ea268221ed4f-kube-api-access-k5xsf\") pod \"placement-b404-account-create-58x4q\" (UID: \"dbee15e7-80d1-4d49-8da5-ea268221ed4f\") " pod="openstack/placement-b404-account-create-58x4q" Nov 25 10:05:18 crc kubenswrapper[4769]: W1125 10:05:18.527820 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a0cfe48_e8db_4d53_b3a8_68c5e724538a.slice/crio-f4ae4bb94ca1de83fe4d866a8ef7d8ec191002e6c16b790b9d96d4c0d332459a WatchSource:0}: Error finding container f4ae4bb94ca1de83fe4d866a8ef7d8ec191002e6c16b790b9d96d4c0d332459a: Status 404 returned error can't find the container with id f4ae4bb94ca1de83fe4d866a8ef7d8ec191002e6c16b790b9d96d4c0d332459a Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.610525 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbee15e7-80d1-4d49-8da5-ea268221ed4f-operator-scripts\") pod \"placement-b404-account-create-58x4q\" (UID: \"dbee15e7-80d1-4d49-8da5-ea268221ed4f\") " pod="openstack/placement-b404-account-create-58x4q" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.610602 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5xsf\" (UniqueName: \"kubernetes.io/projected/dbee15e7-80d1-4d49-8da5-ea268221ed4f-kube-api-access-k5xsf\") pod \"placement-b404-account-create-58x4q\" (UID: \"dbee15e7-80d1-4d49-8da5-ea268221ed4f\") " pod="openstack/placement-b404-account-create-58x4q" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.611689 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbee15e7-80d1-4d49-8da5-ea268221ed4f-operator-scripts\") pod \"placement-b404-account-create-58x4q\" (UID: \"dbee15e7-80d1-4d49-8da5-ea268221ed4f\") " pod="openstack/placement-b404-account-create-58x4q" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.637834 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5xsf\" (UniqueName: \"kubernetes.io/projected/dbee15e7-80d1-4d49-8da5-ea268221ed4f-kube-api-access-k5xsf\") pod \"placement-b404-account-create-58x4q\" (UID: \"dbee15e7-80d1-4d49-8da5-ea268221ed4f\") " pod="openstack/placement-b404-account-create-58x4q" Nov 25 10:05:18 crc kubenswrapper[4769]: I1125 10:05:18.666252 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b404-account-create-58x4q" Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.173709 4769 generic.go:334] "Generic (PLEG): container finished" podID="4857aaf8-4133-4c20-bc8c-d4d195091176" containerID="2a413543e21b9469c261c3a61cde7924060f4282a41e7a3c3da17759fd57caf4" exitCode=0 Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.173832 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4857aaf8-4133-4c20-bc8c-d4d195091176","Type":"ContainerDied","Data":"2a413543e21b9469c261c3a61cde7924060f4282a41e7a3c3da17759fd57caf4"} Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.183556 4769 generic.go:334] "Generic (PLEG): container finished" podID="981369ae-93f2-4c25-bdea-d3d89686b0d5" containerID="e81a4a46bd061521e1409da211c03d0f8532e14a5cf15cf52d40cc95b4cda39d" exitCode=0 Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.183716 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"981369ae-93f2-4c25-bdea-d3d89686b0d5","Type":"ContainerDied","Data":"e81a4a46bd061521e1409da211c03d0f8532e14a5cf15cf52d40cc95b4cda39d"} Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.186786 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"5a0cfe48-e8db-4d53-b3a8-68c5e724538a","Type":"ContainerStarted","Data":"f4ae4bb94ca1de83fe4d866a8ef7d8ec191002e6c16b790b9d96d4c0d332459a"} Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.189185 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-whsjc" event={"ID":"46820746-3540-4da9-9bf3-b43df2d4d66d","Type":"ContainerStarted","Data":"503469cc986e7a367f11cc1599a24d0fe1e5fecda94100009bb1b8168e312845"} Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.202673 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-5b87c94878-dctn4_bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d/console/0.log" Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.202858 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5b87c94878-dctn4" event={"ID":"bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d","Type":"ContainerDied","Data":"12b1401610ff3b450bfa7ec5f8a76f7090d14954e9e2bbd327fd278eb46b14c2"} Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.202913 4769 scope.go:117] "RemoveContainer" containerID="cf7e8896f13a63165b978bb5c787e4d3db2b033bb5fbf646b2f283f9dd5c25e5" Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.202869 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5b87c94878-dctn4" Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.224693 4769 generic.go:334] "Generic (PLEG): container finished" podID="a9d2094a-20f6-43f9-b875-7da52bd2bc10" containerID="fe46a6c00316ba56fae75ff5c2a8b8fcd98315ec2bfdf503860f67732a9f703e" exitCode=0 Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.225726 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" event={"ID":"a9d2094a-20f6-43f9-b875-7da52bd2bc10","Type":"ContainerDied","Data":"fe46a6c00316ba56fae75ff5c2a8b8fcd98315ec2bfdf503860f67732a9f703e"} Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.237631 4769 generic.go:334] "Generic (PLEG): container finished" podID="0dffefd7-a278-4acd-87f4-0b1585dfb869" containerID="b23b88a6ed53c4c1070c204f3c577cb48da6a6379782c8d55f39b3e6c9e206a0" exitCode=0 Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.237752 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2tlt4" event={"ID":"0dffefd7-a278-4acd-87f4-0b1585dfb869","Type":"ContainerDied","Data":"b23b88a6ed53c4c1070c204f3c577cb48da6a6379782c8d55f39b3e6c9e206a0"} Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.239297 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-b404-account-create-58x4q"] Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.270419 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-whsjc" podStartSLOduration=3.270391418 podStartE2EDuration="3.270391418s" podCreationTimestamp="2025-11-25 10:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:05:19.25895479 +0000 UTC m=+1267.843927113" watchObservedRunningTime="2025-11-25 10:05:19.270391418 +0000 UTC m=+1267.855363731" Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.387265 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-5b87c94878-dctn4"] Nov 25 10:05:19 crc kubenswrapper[4769]: I1125 10:05:19.398462 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-5b87c94878-dctn4"] Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.052325 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.164017 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-ovsdbserver-sb\") pod \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.164223 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-dns-svc\") pod \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.164265 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nc57j\" (UniqueName: \"kubernetes.io/projected/a9d2094a-20f6-43f9-b875-7da52bd2bc10-kube-api-access-nc57j\") pod \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.164516 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-config\") pod \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\" (UID: \"a9d2094a-20f6-43f9-b875-7da52bd2bc10\") " Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.176424 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9d2094a-20f6-43f9-b875-7da52bd2bc10-kube-api-access-nc57j" (OuterVolumeSpecName: "kube-api-access-nc57j") pod "a9d2094a-20f6-43f9-b875-7da52bd2bc10" (UID: "a9d2094a-20f6-43f9-b875-7da52bd2bc10"). InnerVolumeSpecName "kube-api-access-nc57j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.219536 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a9d2094a-20f6-43f9-b875-7da52bd2bc10" (UID: "a9d2094a-20f6-43f9-b875-7da52bd2bc10"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.267736 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.268055 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nc57j\" (UniqueName: \"kubernetes.io/projected/a9d2094a-20f6-43f9-b875-7da52bd2bc10-kube-api-access-nc57j\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.296950 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.297237 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d" path="/var/lib/kubelet/pods/bb18d6f0-fbf5-4f14-8677-2cbf4a27ac4d/volumes" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.312435 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c89d5d749-r65jh" event={"ID":"a9d2094a-20f6-43f9-b875-7da52bd2bc10","Type":"ContainerDied","Data":"8111a45b9f2608b9d54fb789e41c6e7dc195ea71366992cbe4f5e48a8e069b93"} Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.312492 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b404-account-create-58x4q" event={"ID":"dbee15e7-80d1-4d49-8da5-ea268221ed4f","Type":"ContainerStarted","Data":"313683b7edb50c99abfc8bac82e2ba0249974b76615fa00b62ad58214638f4b2"} Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.312520 4769 scope.go:117] "RemoveContainer" containerID="fe46a6c00316ba56fae75ff5c2a8b8fcd98315ec2bfdf503860f67732a9f703e" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.327689 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-kgxrr"] Nov 25 10:05:20 crc kubenswrapper[4769]: E1125 10:05:20.328360 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9d2094a-20f6-43f9-b875-7da52bd2bc10" containerName="init" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.328381 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9d2094a-20f6-43f9-b875-7da52bd2bc10" containerName="init" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.328592 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9d2094a-20f6-43f9-b875-7da52bd2bc10" containerName="init" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.329425 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.351834 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-kgxrr"] Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.372649 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbvpq\" (UniqueName: \"kubernetes.io/projected/53cae36d-5253-41e8-b6a8-b716251729b6-kube-api-access-zbvpq\") pod \"mysqld-exporter-openstack-db-create-kgxrr\" (UID: \"53cae36d-5253-41e8-b6a8-b716251729b6\") " pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.372862 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/53cae36d-5253-41e8-b6a8-b716251729b6-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-kgxrr\" (UID: \"53cae36d-5253-41e8-b6a8-b716251729b6\") " pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.477862 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbvpq\" (UniqueName: \"kubernetes.io/projected/53cae36d-5253-41e8-b6a8-b716251729b6-kube-api-access-zbvpq\") pod \"mysqld-exporter-openstack-db-create-kgxrr\" (UID: \"53cae36d-5253-41e8-b6a8-b716251729b6\") " pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.478289 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/53cae36d-5253-41e8-b6a8-b716251729b6-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-kgxrr\" (UID: \"53cae36d-5253-41e8-b6a8-b716251729b6\") " pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.479096 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/53cae36d-5253-41e8-b6a8-b716251729b6-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-kgxrr\" (UID: \"53cae36d-5253-41e8-b6a8-b716251729b6\") " pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.500338 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a9d2094a-20f6-43f9-b875-7da52bd2bc10" (UID: "a9d2094a-20f6-43f9-b875-7da52bd2bc10"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.501295 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbvpq\" (UniqueName: \"kubernetes.io/projected/53cae36d-5253-41e8-b6a8-b716251729b6-kube-api-access-zbvpq\") pod \"mysqld-exporter-openstack-db-create-kgxrr\" (UID: \"53cae36d-5253-41e8-b6a8-b716251729b6\") " pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.507639 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-config" (OuterVolumeSpecName: "config") pod "a9d2094a-20f6-43f9-b875-7da52bd2bc10" (UID: "a9d2094a-20f6-43f9-b875-7da52bd2bc10"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.564044 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0af1-account-create-dkkq6"] Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.566070 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.568701 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.576152 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0af1-account-create-dkkq6"] Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.580676 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.580705 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9d2094a-20f6-43f9-b875-7da52bd2bc10-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.595558 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.661466 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.673491 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.691817 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5t7q\" (UniqueName: \"kubernetes.io/projected/d226b614-be9f-4d60-a7b4-f6ce81a8753f-kube-api-access-m5t7q\") pod \"mysqld-exporter-0af1-account-create-dkkq6\" (UID: \"d226b614-be9f-4d60-a7b4-f6ce81a8753f\") " pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.692044 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d226b614-be9f-4d60-a7b4-f6ce81a8753f-operator-scripts\") pod \"mysqld-exporter-0af1-account-create-dkkq6\" (UID: \"d226b614-be9f-4d60-a7b4-f6ce81a8753f\") " pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.776776 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c89d5d749-r65jh"] Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.788910 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c89d5d749-r65jh"] Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.794766 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5t7q\" (UniqueName: \"kubernetes.io/projected/d226b614-be9f-4d60-a7b4-f6ce81a8753f-kube-api-access-m5t7q\") pod \"mysqld-exporter-0af1-account-create-dkkq6\" (UID: \"d226b614-be9f-4d60-a7b4-f6ce81a8753f\") " pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.794892 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d226b614-be9f-4d60-a7b4-f6ce81a8753f-operator-scripts\") pod \"mysqld-exporter-0af1-account-create-dkkq6\" (UID: \"d226b614-be9f-4d60-a7b4-f6ce81a8753f\") " pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.797739 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d226b614-be9f-4d60-a7b4-f6ce81a8753f-operator-scripts\") pod \"mysqld-exporter-0af1-account-create-dkkq6\" (UID: \"d226b614-be9f-4d60-a7b4-f6ce81a8753f\") " pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.822145 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5t7q\" (UniqueName: \"kubernetes.io/projected/d226b614-be9f-4d60-a7b4-f6ce81a8753f-kube-api-access-m5t7q\") pod \"mysqld-exporter-0af1-account-create-dkkq6\" (UID: \"d226b614-be9f-4d60-a7b4-f6ce81a8753f\") " pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" Nov 25 10:05:20 crc kubenswrapper[4769]: I1125 10:05:20.857296 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:20.993733 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.314185 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"981369ae-93f2-4c25-bdea-d3d89686b0d5","Type":"ContainerStarted","Data":"e89816873ee41f43364f9d484d1ce5af7f6013fdc49031678da977a414617fba"} Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.314525 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.315847 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"5a0cfe48-e8db-4d53-b3a8-68c5e724538a","Type":"ContainerStarted","Data":"814cea9631923ced95818955da6b77181d8466fd6298270e6e65851ce406268d"} Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.315883 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"5a0cfe48-e8db-4d53-b3a8-68c5e724538a","Type":"ContainerStarted","Data":"358f241cd00d8d7490f93cd5e9c8961c4f7aa674d147d82e4163ddff53f3b740"} Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.316527 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.322533 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2tlt4" event={"ID":"0dffefd7-a278-4acd-87f4-0b1585dfb869","Type":"ContainerStarted","Data":"b3f62375ae317683f5b0dd0fa4c637d50ca7f4926d68f49b5bdb5aa536893a5a"} Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.322876 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.325463 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4857aaf8-4133-4c20-bc8c-d4d195091176","Type":"ContainerStarted","Data":"1d15ed991d65b7745c50096bb73a804672d4002840b23053fddc04ecdab15ac0"} Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.326733 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.328639 4769 generic.go:334] "Generic (PLEG): container finished" podID="dbee15e7-80d1-4d49-8da5-ea268221ed4f" containerID="5637982e602a3100947e08914ec4192f6b6acced023664555932a84db33bcc62" exitCode=0 Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.329404 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b404-account-create-58x4q" event={"ID":"dbee15e7-80d1-4d49-8da5-ea268221ed4f","Type":"ContainerDied","Data":"5637982e602a3100947e08914ec4192f6b6acced023664555932a84db33bcc62"} Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.364999 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=39.461477195 podStartE2EDuration="58.364976515s" podCreationTimestamp="2025-11-25 10:04:23 +0000 UTC" firstStartedPulling="2025-11-25 10:04:25.399916489 +0000 UTC m=+1213.984888802" lastFinishedPulling="2025-11-25 10:04:44.303415809 +0000 UTC m=+1232.888388122" observedRunningTime="2025-11-25 10:05:21.344061961 +0000 UTC m=+1269.929034294" watchObservedRunningTime="2025-11-25 10:05:21.364976515 +0000 UTC m=+1269.949948828" Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.394549 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=39.957340377 podStartE2EDuration="58.394503904s" podCreationTimestamp="2025-11-25 10:04:23 +0000 UTC" firstStartedPulling="2025-11-25 10:04:25.719241617 +0000 UTC m=+1214.304213920" lastFinishedPulling="2025-11-25 10:04:44.156405134 +0000 UTC m=+1232.741377447" observedRunningTime="2025-11-25 10:05:21.387172253 +0000 UTC m=+1269.972144576" watchObservedRunningTime="2025-11-25 10:05:21.394503904 +0000 UTC m=+1269.979476217" Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.427047 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.048533686 podStartE2EDuration="4.42701695s" podCreationTimestamp="2025-11-25 10:05:17 +0000 UTC" firstStartedPulling="2025-11-25 10:05:18.532284735 +0000 UTC m=+1267.117257048" lastFinishedPulling="2025-11-25 10:05:19.910767999 +0000 UTC m=+1268.495740312" observedRunningTime="2025-11-25 10:05:21.408875578 +0000 UTC m=+1269.993847891" watchObservedRunningTime="2025-11-25 10:05:21.42701695 +0000 UTC m=+1270.011989263" Nov 25 10:05:21 crc kubenswrapper[4769]: I1125 10:05:21.439790 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-2tlt4" podStartSLOduration=5.439765381 podStartE2EDuration="5.439765381s" podCreationTimestamp="2025-11-25 10:05:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:05:21.439011402 +0000 UTC m=+1270.023983725" watchObservedRunningTime="2025-11-25 10:05:21.439765381 +0000 UTC m=+1270.024737694" Nov 25 10:05:22 crc kubenswrapper[4769]: I1125 10:05:22.164064 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0af1-account-create-dkkq6"] Nov 25 10:05:22 crc kubenswrapper[4769]: W1125 10:05:22.171445 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd226b614_be9f_4d60_a7b4_f6ce81a8753f.slice/crio-d08e7396897035bf69fcd535957190b18c7495ac4167c9bdb0af1a49a278c719 WatchSource:0}: Error finding container d08e7396897035bf69fcd535957190b18c7495ac4167c9bdb0af1a49a278c719: Status 404 returned error can't find the container with id d08e7396897035bf69fcd535957190b18c7495ac4167c9bdb0af1a49a278c719 Nov 25 10:05:22 crc kubenswrapper[4769]: I1125 10:05:22.174940 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-kgxrr"] Nov 25 10:05:22 crc kubenswrapper[4769]: I1125 10:05:22.270330 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9d2094a-20f6-43f9-b875-7da52bd2bc10" path="/var/lib/kubelet/pods/a9d2094a-20f6-43f9-b875-7da52bd2bc10/volumes" Nov 25 10:05:22 crc kubenswrapper[4769]: I1125 10:05:22.341562 4769 generic.go:334] "Generic (PLEG): container finished" podID="6e5767ed-9248-4df0-945f-817154feb33c" containerID="d2adae65ece1d6de11e84374d05a398eccc3c53002ca070a454c688c3c557f0a" exitCode=0 Nov 25 10:05:22 crc kubenswrapper[4769]: I1125 10:05:22.341627 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-mq88x" event={"ID":"6e5767ed-9248-4df0-945f-817154feb33c","Type":"ContainerDied","Data":"d2adae65ece1d6de11e84374d05a398eccc3c53002ca070a454c688c3c557f0a"} Nov 25 10:05:22 crc kubenswrapper[4769]: I1125 10:05:22.344702 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" event={"ID":"53cae36d-5253-41e8-b6a8-b716251729b6","Type":"ContainerStarted","Data":"b6b338206267e0c096f784948afe7b49c904c3227f48911648c7264f349f83d3"} Nov 25 10:05:22 crc kubenswrapper[4769]: I1125 10:05:22.347576 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" event={"ID":"d226b614-be9f-4d60-a7b4-f6ce81a8753f","Type":"ContainerStarted","Data":"d08e7396897035bf69fcd535957190b18c7495ac4167c9bdb0af1a49a278c719"} Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.366373 4769 generic.go:334] "Generic (PLEG): container finished" podID="53cae36d-5253-41e8-b6a8-b716251729b6" containerID="511333d30b3ff933fa85b13db5ff01f2938bfa2ad4f8a9b464f104c151252bb1" exitCode=0 Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.366445 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" event={"ID":"53cae36d-5253-41e8-b6a8-b716251729b6","Type":"ContainerDied","Data":"511333d30b3ff933fa85b13db5ff01f2938bfa2ad4f8a9b464f104c151252bb1"} Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.373540 4769 generic.go:334] "Generic (PLEG): container finished" podID="d226b614-be9f-4d60-a7b4-f6ce81a8753f" containerID="0c800b2a9cf0548231ba249b6b44ff268432ab40918388b26d163be6f959e5a8" exitCode=0 Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.373661 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" event={"ID":"d226b614-be9f-4d60-a7b4-f6ce81a8753f","Type":"ContainerDied","Data":"0c800b2a9cf0548231ba249b6b44ff268432ab40918388b26d163be6f959e5a8"} Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.621297 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-2g226"] Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.623545 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2g226" Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.646127 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2g226"] Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.721726 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-966e-account-create-cd5fr"] Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.723503 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-966e-account-create-cd5fr" Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.727059 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.751739 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-966e-account-create-cd5fr"] Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.795426 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bpcn\" (UniqueName: \"kubernetes.io/projected/b4286152-9aa2-4d10-94b0-0676b434dc03-kube-api-access-8bpcn\") pod \"glance-db-create-2g226\" (UID: \"b4286152-9aa2-4d10-94b0-0676b434dc03\") " pod="openstack/glance-db-create-2g226" Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.795586 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4286152-9aa2-4d10-94b0-0676b434dc03-operator-scripts\") pod \"glance-db-create-2g226\" (UID: \"b4286152-9aa2-4d10-94b0-0676b434dc03\") " pod="openstack/glance-db-create-2g226" Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.897943 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2f8ec92-08ca-40fd-b130-33cbd71ab39b-operator-scripts\") pod \"glance-966e-account-create-cd5fr\" (UID: \"e2f8ec92-08ca-40fd-b130-33cbd71ab39b\") " pod="openstack/glance-966e-account-create-cd5fr" Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.898106 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bpcn\" (UniqueName: \"kubernetes.io/projected/b4286152-9aa2-4d10-94b0-0676b434dc03-kube-api-access-8bpcn\") pod \"glance-db-create-2g226\" (UID: \"b4286152-9aa2-4d10-94b0-0676b434dc03\") " pod="openstack/glance-db-create-2g226" Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.898148 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgtjg\" (UniqueName: \"kubernetes.io/projected/e2f8ec92-08ca-40fd-b130-33cbd71ab39b-kube-api-access-vgtjg\") pod \"glance-966e-account-create-cd5fr\" (UID: \"e2f8ec92-08ca-40fd-b130-33cbd71ab39b\") " pod="openstack/glance-966e-account-create-cd5fr" Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.898179 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4286152-9aa2-4d10-94b0-0676b434dc03-operator-scripts\") pod \"glance-db-create-2g226\" (UID: \"b4286152-9aa2-4d10-94b0-0676b434dc03\") " pod="openstack/glance-db-create-2g226" Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.899098 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4286152-9aa2-4d10-94b0-0676b434dc03-operator-scripts\") pod \"glance-db-create-2g226\" (UID: \"b4286152-9aa2-4d10-94b0-0676b434dc03\") " pod="openstack/glance-db-create-2g226" Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.933382 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bpcn\" (UniqueName: \"kubernetes.io/projected/b4286152-9aa2-4d10-94b0-0676b434dc03-kube-api-access-8bpcn\") pod \"glance-db-create-2g226\" (UID: \"b4286152-9aa2-4d10-94b0-0676b434dc03\") " pod="openstack/glance-db-create-2g226" Nov 25 10:05:23 crc kubenswrapper[4769]: I1125 10:05:23.958973 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2g226" Nov 25 10:05:24 crc kubenswrapper[4769]: I1125 10:05:24.000296 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgtjg\" (UniqueName: \"kubernetes.io/projected/e2f8ec92-08ca-40fd-b130-33cbd71ab39b-kube-api-access-vgtjg\") pod \"glance-966e-account-create-cd5fr\" (UID: \"e2f8ec92-08ca-40fd-b130-33cbd71ab39b\") " pod="openstack/glance-966e-account-create-cd5fr" Nov 25 10:05:24 crc kubenswrapper[4769]: I1125 10:05:24.000457 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2f8ec92-08ca-40fd-b130-33cbd71ab39b-operator-scripts\") pod \"glance-966e-account-create-cd5fr\" (UID: \"e2f8ec92-08ca-40fd-b130-33cbd71ab39b\") " pod="openstack/glance-966e-account-create-cd5fr" Nov 25 10:05:24 crc kubenswrapper[4769]: I1125 10:05:24.001203 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2f8ec92-08ca-40fd-b130-33cbd71ab39b-operator-scripts\") pod \"glance-966e-account-create-cd5fr\" (UID: \"e2f8ec92-08ca-40fd-b130-33cbd71ab39b\") " pod="openstack/glance-966e-account-create-cd5fr" Nov 25 10:05:24 crc kubenswrapper[4769]: I1125 10:05:24.024695 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgtjg\" (UniqueName: \"kubernetes.io/projected/e2f8ec92-08ca-40fd-b130-33cbd71ab39b-kube-api-access-vgtjg\") pod \"glance-966e-account-create-cd5fr\" (UID: \"e2f8ec92-08ca-40fd-b130-33cbd71ab39b\") " pod="openstack/glance-966e-account-create-cd5fr" Nov 25 10:05:24 crc kubenswrapper[4769]: I1125 10:05:24.044932 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-966e-account-create-cd5fr" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.245849 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b404-account-create-58x4q" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.291944 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.293925 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.301775 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.363658 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbee15e7-80d1-4d49-8da5-ea268221ed4f-operator-scripts\") pod \"dbee15e7-80d1-4d49-8da5-ea268221ed4f\" (UID: \"dbee15e7-80d1-4d49-8da5-ea268221ed4f\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.363866 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5xsf\" (UniqueName: \"kubernetes.io/projected/dbee15e7-80d1-4d49-8da5-ea268221ed4f-kube-api-access-k5xsf\") pod \"dbee15e7-80d1-4d49-8da5-ea268221ed4f\" (UID: \"dbee15e7-80d1-4d49-8da5-ea268221ed4f\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.364502 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbee15e7-80d1-4d49-8da5-ea268221ed4f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dbee15e7-80d1-4d49-8da5-ea268221ed4f" (UID: "dbee15e7-80d1-4d49-8da5-ea268221ed4f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.364819 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbee15e7-80d1-4d49-8da5-ea268221ed4f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.369608 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbee15e7-80d1-4d49-8da5-ea268221ed4f-kube-api-access-k5xsf" (OuterVolumeSpecName: "kube-api-access-k5xsf") pod "dbee15e7-80d1-4d49-8da5-ea268221ed4f" (UID: "dbee15e7-80d1-4d49-8da5-ea268221ed4f"). InnerVolumeSpecName "kube-api-access-k5xsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.416953 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b404-account-create-58x4q" event={"ID":"dbee15e7-80d1-4d49-8da5-ea268221ed4f","Type":"ContainerDied","Data":"313683b7edb50c99abfc8bac82e2ba0249974b76615fa00b62ad58214638f4b2"} Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.417014 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b404-account-create-58x4q" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.417026 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="313683b7edb50c99abfc8bac82e2ba0249974b76615fa00b62ad58214638f4b2" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.418675 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.418671 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0af1-account-create-dkkq6" event={"ID":"d226b614-be9f-4d60-a7b4-f6ce81a8753f","Type":"ContainerDied","Data":"d08e7396897035bf69fcd535957190b18c7495ac4167c9bdb0af1a49a278c719"} Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.418799 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d08e7396897035bf69fcd535957190b18c7495ac4167c9bdb0af1a49a278c719" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.422134 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7","Type":"ContainerStarted","Data":"be4b19b044f5a9e7a9f93745ce27d7018083d4e1a0a9385de3b6fb62e1968c17"} Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.424644 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-mq88x" event={"ID":"6e5767ed-9248-4df0-945f-817154feb33c","Type":"ContainerDied","Data":"3e843af8a46c232fc596586728d57b57b32faf3683eca0ebbfca9eb3b53c0912"} Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.424671 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e843af8a46c232fc596586728d57b57b32faf3683eca0ebbfca9eb3b53c0912" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.424741 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-mq88x" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.433788 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" event={"ID":"53cae36d-5253-41e8-b6a8-b716251729b6","Type":"ContainerDied","Data":"b6b338206267e0c096f784948afe7b49c904c3227f48911648c7264f349f83d3"} Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.433840 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6b338206267e0c096f784948afe7b49c904c3227f48911648c7264f349f83d3" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.433911 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-kgxrr" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.466931 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e5767ed-9248-4df0-945f-817154feb33c-scripts\") pod \"6e5767ed-9248-4df0-945f-817154feb33c\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.467013 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5t7q\" (UniqueName: \"kubernetes.io/projected/d226b614-be9f-4d60-a7b4-f6ce81a8753f-kube-api-access-m5t7q\") pod \"d226b614-be9f-4d60-a7b4-f6ce81a8753f\" (UID: \"d226b614-be9f-4d60-a7b4-f6ce81a8753f\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.467058 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/53cae36d-5253-41e8-b6a8-b716251729b6-operator-scripts\") pod \"53cae36d-5253-41e8-b6a8-b716251729b6\" (UID: \"53cae36d-5253-41e8-b6a8-b716251729b6\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.467129 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-dispersionconf\") pod \"6e5767ed-9248-4df0-945f-817154feb33c\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.467170 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d226b614-be9f-4d60-a7b4-f6ce81a8753f-operator-scripts\") pod \"d226b614-be9f-4d60-a7b4-f6ce81a8753f\" (UID: \"d226b614-be9f-4d60-a7b4-f6ce81a8753f\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.467203 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-swiftconf\") pod \"6e5767ed-9248-4df0-945f-817154feb33c\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.467295 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbvpq\" (UniqueName: \"kubernetes.io/projected/53cae36d-5253-41e8-b6a8-b716251729b6-kube-api-access-zbvpq\") pod \"53cae36d-5253-41e8-b6a8-b716251729b6\" (UID: \"53cae36d-5253-41e8-b6a8-b716251729b6\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.467344 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6e5767ed-9248-4df0-945f-817154feb33c-etc-swift\") pod \"6e5767ed-9248-4df0-945f-817154feb33c\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.467422 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6e5767ed-9248-4df0-945f-817154feb33c-ring-data-devices\") pod \"6e5767ed-9248-4df0-945f-817154feb33c\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.467514 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8fvw\" (UniqueName: \"kubernetes.io/projected/6e5767ed-9248-4df0-945f-817154feb33c-kube-api-access-r8fvw\") pod \"6e5767ed-9248-4df0-945f-817154feb33c\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.467570 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-combined-ca-bundle\") pod \"6e5767ed-9248-4df0-945f-817154feb33c\" (UID: \"6e5767ed-9248-4df0-945f-817154feb33c\") " Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.468540 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e5767ed-9248-4df0-945f-817154feb33c-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "6e5767ed-9248-4df0-945f-817154feb33c" (UID: "6e5767ed-9248-4df0-945f-817154feb33c"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.468634 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e5767ed-9248-4df0-945f-817154feb33c-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "6e5767ed-9248-4df0-945f-817154feb33c" (UID: "6e5767ed-9248-4df0-945f-817154feb33c"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.468948 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53cae36d-5253-41e8-b6a8-b716251729b6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "53cae36d-5253-41e8-b6a8-b716251729b6" (UID: "53cae36d-5253-41e8-b6a8-b716251729b6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.469188 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/53cae36d-5253-41e8-b6a8-b716251729b6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.469206 4769 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/6e5767ed-9248-4df0-945f-817154feb33c-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.469220 4769 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/6e5767ed-9248-4df0-945f-817154feb33c-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.469231 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5xsf\" (UniqueName: \"kubernetes.io/projected/dbee15e7-80d1-4d49-8da5-ea268221ed4f-kube-api-access-k5xsf\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.471531 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d226b614-be9f-4d60-a7b4-f6ce81a8753f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d226b614-be9f-4d60-a7b4-f6ce81a8753f" (UID: "d226b614-be9f-4d60-a7b4-f6ce81a8753f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.483047 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d226b614-be9f-4d60-a7b4-f6ce81a8753f-kube-api-access-m5t7q" (OuterVolumeSpecName: "kube-api-access-m5t7q") pod "d226b614-be9f-4d60-a7b4-f6ce81a8753f" (UID: "d226b614-be9f-4d60-a7b4-f6ce81a8753f"). InnerVolumeSpecName "kube-api-access-m5t7q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.483884 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "6e5767ed-9248-4df0-945f-817154feb33c" (UID: "6e5767ed-9248-4df0-945f-817154feb33c"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.487928 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53cae36d-5253-41e8-b6a8-b716251729b6-kube-api-access-zbvpq" (OuterVolumeSpecName: "kube-api-access-zbvpq") pod "53cae36d-5253-41e8-b6a8-b716251729b6" (UID: "53cae36d-5253-41e8-b6a8-b716251729b6"). InnerVolumeSpecName "kube-api-access-zbvpq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.492337 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e5767ed-9248-4df0-945f-817154feb33c-kube-api-access-r8fvw" (OuterVolumeSpecName: "kube-api-access-r8fvw") pod "6e5767ed-9248-4df0-945f-817154feb33c" (UID: "6e5767ed-9248-4df0-945f-817154feb33c"). InnerVolumeSpecName "kube-api-access-r8fvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.508138 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "6e5767ed-9248-4df0-945f-817154feb33c" (UID: "6e5767ed-9248-4df0-945f-817154feb33c"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.508245 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e5767ed-9248-4df0-945f-817154feb33c-scripts" (OuterVolumeSpecName: "scripts") pod "6e5767ed-9248-4df0-945f-817154feb33c" (UID: "6e5767ed-9248-4df0-945f-817154feb33c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.510857 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e5767ed-9248-4df0-945f-817154feb33c" (UID: "6e5767ed-9248-4df0-945f-817154feb33c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.570374 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbvpq\" (UniqueName: \"kubernetes.io/projected/53cae36d-5253-41e8-b6a8-b716251729b6-kube-api-access-zbvpq\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.570417 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8fvw\" (UniqueName: \"kubernetes.io/projected/6e5767ed-9248-4df0-945f-817154feb33c-kube-api-access-r8fvw\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.570431 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.570445 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6e5767ed-9248-4df0-945f-817154feb33c-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.570456 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5t7q\" (UniqueName: \"kubernetes.io/projected/d226b614-be9f-4d60-a7b4-f6ce81a8753f-kube-api-access-m5t7q\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.570469 4769 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.570481 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d226b614-be9f-4d60-a7b4-f6ce81a8753f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.570493 4769 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/6e5767ed-9248-4df0-945f-817154feb33c-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.624140 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2g226"] Nov 25 10:05:26 crc kubenswrapper[4769]: W1125 10:05:26.626225 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4286152_9aa2_4d10_94b0_0676b434dc03.slice/crio-39c4a1fdb268cc6751bb3de62d1b2bbfa4d25f3b02607bf7b18dca1ce4753a18 WatchSource:0}: Error finding container 39c4a1fdb268cc6751bb3de62d1b2bbfa4d25f3b02607bf7b18dca1ce4753a18: Status 404 returned error can't find the container with id 39c4a1fdb268cc6751bb3de62d1b2bbfa4d25f3b02607bf7b18dca1ce4753a18 Nov 25 10:05:26 crc kubenswrapper[4769]: I1125 10:05:26.732115 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-966e-account-create-cd5fr"] Nov 25 10:05:26 crc kubenswrapper[4769]: W1125 10:05:26.738277 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode2f8ec92_08ca_40fd_b130_33cbd71ab39b.slice/crio-7f3b724d383760d43a3d70805fffa06b36b747fa800e2c717df906e5d3219413 WatchSource:0}: Error finding container 7f3b724d383760d43a3d70805fffa06b36b747fa800e2c717df906e5d3219413: Status 404 returned error can't find the container with id 7f3b724d383760d43a3d70805fffa06b36b747fa800e2c717df906e5d3219413 Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.243215 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.388355 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-72c4b"] Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.388717 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" podUID="bb46db53-8238-4089-9d9c-e4e70675ba4d" containerName="dnsmasq-dns" containerID="cri-o://c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6" gracePeriod=10 Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.447844 4769 generic.go:334] "Generic (PLEG): container finished" podID="e2f8ec92-08ca-40fd-b130-33cbd71ab39b" containerID="2cb2b538777f7251522fd0b7f4abf05485ab29a97c999e901c40d54906640f7e" exitCode=0 Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.447948 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-966e-account-create-cd5fr" event={"ID":"e2f8ec92-08ca-40fd-b130-33cbd71ab39b","Type":"ContainerDied","Data":"2cb2b538777f7251522fd0b7f4abf05485ab29a97c999e901c40d54906640f7e"} Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.448006 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-966e-account-create-cd5fr" event={"ID":"e2f8ec92-08ca-40fd-b130-33cbd71ab39b","Type":"ContainerStarted","Data":"7f3b724d383760d43a3d70805fffa06b36b747fa800e2c717df906e5d3219413"} Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.450535 4769 generic.go:334] "Generic (PLEG): container finished" podID="b4286152-9aa2-4d10-94b0-0676b434dc03" containerID="db6cf4b655ec479d27626be2cd2afbc3295defb227fcf96069846346f0c90ca6" exitCode=0 Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.450567 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2g226" event={"ID":"b4286152-9aa2-4d10-94b0-0676b434dc03","Type":"ContainerDied","Data":"db6cf4b655ec479d27626be2cd2afbc3295defb227fcf96069846346f0c90ca6"} Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.450584 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2g226" event={"ID":"b4286152-9aa2-4d10-94b0-0676b434dc03","Type":"ContainerStarted","Data":"39c4a1fdb268cc6751bb3de62d1b2bbfa4d25f3b02607bf7b18dca1ce4753a18"} Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.855676 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-cjxql"] Nov 25 10:05:27 crc kubenswrapper[4769]: E1125 10:05:27.856652 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbee15e7-80d1-4d49-8da5-ea268221ed4f" containerName="mariadb-account-create" Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.856673 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbee15e7-80d1-4d49-8da5-ea268221ed4f" containerName="mariadb-account-create" Nov 25 10:05:27 crc kubenswrapper[4769]: E1125 10:05:27.856700 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e5767ed-9248-4df0-945f-817154feb33c" containerName="swift-ring-rebalance" Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.856707 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e5767ed-9248-4df0-945f-817154feb33c" containerName="swift-ring-rebalance" Nov 25 10:05:27 crc kubenswrapper[4769]: E1125 10:05:27.856746 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d226b614-be9f-4d60-a7b4-f6ce81a8753f" containerName="mariadb-account-create" Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.856778 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="d226b614-be9f-4d60-a7b4-f6ce81a8753f" containerName="mariadb-account-create" Nov 25 10:05:27 crc kubenswrapper[4769]: E1125 10:05:27.856791 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53cae36d-5253-41e8-b6a8-b716251729b6" containerName="mariadb-database-create" Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.856796 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="53cae36d-5253-41e8-b6a8-b716251729b6" containerName="mariadb-database-create" Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.857006 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbee15e7-80d1-4d49-8da5-ea268221ed4f" containerName="mariadb-account-create" Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.857038 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e5767ed-9248-4df0-945f-817154feb33c" containerName="swift-ring-rebalance" Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.857075 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="53cae36d-5253-41e8-b6a8-b716251729b6" containerName="mariadb-database-create" Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.857092 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="d226b614-be9f-4d60-a7b4-f6ce81a8753f" containerName="mariadb-account-create" Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.858049 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-cjxql" Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.879021 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-cjxql"] Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.904989 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28837799-87a3-4a15-aaeb-91c949f21ef5-operator-scripts\") pod \"keystone-db-create-cjxql\" (UID: \"28837799-87a3-4a15-aaeb-91c949f21ef5\") " pod="openstack/keystone-db-create-cjxql" Nov 25 10:05:27 crc kubenswrapper[4769]: I1125 10:05:27.905459 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2vw5\" (UniqueName: \"kubernetes.io/projected/28837799-87a3-4a15-aaeb-91c949f21ef5-kube-api-access-s2vw5\") pod \"keystone-db-create-cjxql\" (UID: \"28837799-87a3-4a15-aaeb-91c949f21ef5\") " pod="openstack/keystone-db-create-cjxql" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.004726 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-24df-account-create-vj7tp"] Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.006501 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-24df-account-create-vj7tp" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.007084 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2vw5\" (UniqueName: \"kubernetes.io/projected/28837799-87a3-4a15-aaeb-91c949f21ef5-kube-api-access-s2vw5\") pod \"keystone-db-create-cjxql\" (UID: \"28837799-87a3-4a15-aaeb-91c949f21ef5\") " pod="openstack/keystone-db-create-cjxql" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.007155 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28837799-87a3-4a15-aaeb-91c949f21ef5-operator-scripts\") pod \"keystone-db-create-cjxql\" (UID: \"28837799-87a3-4a15-aaeb-91c949f21ef5\") " pod="openstack/keystone-db-create-cjxql" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.008020 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28837799-87a3-4a15-aaeb-91c949f21ef5-operator-scripts\") pod \"keystone-db-create-cjxql\" (UID: \"28837799-87a3-4a15-aaeb-91c949f21ef5\") " pod="openstack/keystone-db-create-cjxql" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.009621 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.020925 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-24df-account-create-vj7tp"] Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.035389 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.045827 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2vw5\" (UniqueName: \"kubernetes.io/projected/28837799-87a3-4a15-aaeb-91c949f21ef5-kube-api-access-s2vw5\") pod \"keystone-db-create-cjxql\" (UID: \"28837799-87a3-4a15-aaeb-91c949f21ef5\") " pod="openstack/keystone-db-create-cjxql" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.108937 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zr6cp\" (UniqueName: \"kubernetes.io/projected/7d7f8889-8ac3-4fbd-83be-743fd53cea7a-kube-api-access-zr6cp\") pod \"keystone-24df-account-create-vj7tp\" (UID: \"7d7f8889-8ac3-4fbd-83be-743fd53cea7a\") " pod="openstack/keystone-24df-account-create-vj7tp" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.109155 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7d7f8889-8ac3-4fbd-83be-743fd53cea7a-operator-scripts\") pod \"keystone-24df-account-create-vj7tp\" (UID: \"7d7f8889-8ac3-4fbd-83be-743fd53cea7a\") " pod="openstack/keystone-24df-account-create-vj7tp" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.180494 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-rlvtg"] Nov 25 10:05:28 crc kubenswrapper[4769]: E1125 10:05:28.181027 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb46db53-8238-4089-9d9c-e4e70675ba4d" containerName="dnsmasq-dns" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.181049 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb46db53-8238-4089-9d9c-e4e70675ba4d" containerName="dnsmasq-dns" Nov 25 10:05:28 crc kubenswrapper[4769]: E1125 10:05:28.181060 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb46db53-8238-4089-9d9c-e4e70675ba4d" containerName="init" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.181067 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb46db53-8238-4089-9d9c-e4e70675ba4d" containerName="init" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.181295 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb46db53-8238-4089-9d9c-e4e70675ba4d" containerName="dnsmasq-dns" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.181471 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-cjxql" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.182076 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-rlvtg" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.199163 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-rlvtg"] Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.210053 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb46db53-8238-4089-9d9c-e4e70675ba4d-dns-svc\") pod \"bb46db53-8238-4089-9d9c-e4e70675ba4d\" (UID: \"bb46db53-8238-4089-9d9c-e4e70675ba4d\") " Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.210135 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sn4gd\" (UniqueName: \"kubernetes.io/projected/bb46db53-8238-4089-9d9c-e4e70675ba4d-kube-api-access-sn4gd\") pod \"bb46db53-8238-4089-9d9c-e4e70675ba4d\" (UID: \"bb46db53-8238-4089-9d9c-e4e70675ba4d\") " Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.210519 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb46db53-8238-4089-9d9c-e4e70675ba4d-config\") pod \"bb46db53-8238-4089-9d9c-e4e70675ba4d\" (UID: \"bb46db53-8238-4089-9d9c-e4e70675ba4d\") " Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.210973 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7d7f8889-8ac3-4fbd-83be-743fd53cea7a-operator-scripts\") pod \"keystone-24df-account-create-vj7tp\" (UID: \"7d7f8889-8ac3-4fbd-83be-743fd53cea7a\") " pod="openstack/keystone-24df-account-create-vj7tp" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.211065 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zr6cp\" (UniqueName: \"kubernetes.io/projected/7d7f8889-8ac3-4fbd-83be-743fd53cea7a-kube-api-access-zr6cp\") pod \"keystone-24df-account-create-vj7tp\" (UID: \"7d7f8889-8ac3-4fbd-83be-743fd53cea7a\") " pod="openstack/keystone-24df-account-create-vj7tp" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.212646 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7d7f8889-8ac3-4fbd-83be-743fd53cea7a-operator-scripts\") pod \"keystone-24df-account-create-vj7tp\" (UID: \"7d7f8889-8ac3-4fbd-83be-743fd53cea7a\") " pod="openstack/keystone-24df-account-create-vj7tp" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.220984 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb46db53-8238-4089-9d9c-e4e70675ba4d-kube-api-access-sn4gd" (OuterVolumeSpecName: "kube-api-access-sn4gd") pod "bb46db53-8238-4089-9d9c-e4e70675ba4d" (UID: "bb46db53-8238-4089-9d9c-e4e70675ba4d"). InnerVolumeSpecName "kube-api-access-sn4gd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.240194 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zr6cp\" (UniqueName: \"kubernetes.io/projected/7d7f8889-8ac3-4fbd-83be-743fd53cea7a-kube-api-access-zr6cp\") pod \"keystone-24df-account-create-vj7tp\" (UID: \"7d7f8889-8ac3-4fbd-83be-743fd53cea7a\") " pod="openstack/keystone-24df-account-create-vj7tp" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.273933 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb46db53-8238-4089-9d9c-e4e70675ba4d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bb46db53-8238-4089-9d9c-e4e70675ba4d" (UID: "bb46db53-8238-4089-9d9c-e4e70675ba4d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.300154 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb46db53-8238-4089-9d9c-e4e70675ba4d-config" (OuterVolumeSpecName: "config") pod "bb46db53-8238-4089-9d9c-e4e70675ba4d" (UID: "bb46db53-8238-4089-9d9c-e4e70675ba4d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.314801 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbjfm\" (UniqueName: \"kubernetes.io/projected/783756da-1dfb-49b3-9472-d33844dffc95-kube-api-access-mbjfm\") pod \"placement-db-create-rlvtg\" (UID: \"783756da-1dfb-49b3-9472-d33844dffc95\") " pod="openstack/placement-db-create-rlvtg" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.315094 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/783756da-1dfb-49b3-9472-d33844dffc95-operator-scripts\") pod \"placement-db-create-rlvtg\" (UID: \"783756da-1dfb-49b3-9472-d33844dffc95\") " pod="openstack/placement-db-create-rlvtg" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.315283 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bb46db53-8238-4089-9d9c-e4e70675ba4d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.315309 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sn4gd\" (UniqueName: \"kubernetes.io/projected/bb46db53-8238-4089-9d9c-e4e70675ba4d-kube-api-access-sn4gd\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.315329 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb46db53-8238-4089-9d9c-e4e70675ba4d-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.339111 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-24df-account-create-vj7tp" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.417951 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/783756da-1dfb-49b3-9472-d33844dffc95-operator-scripts\") pod \"placement-db-create-rlvtg\" (UID: \"783756da-1dfb-49b3-9472-d33844dffc95\") " pod="openstack/placement-db-create-rlvtg" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.418589 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbjfm\" (UniqueName: \"kubernetes.io/projected/783756da-1dfb-49b3-9472-d33844dffc95-kube-api-access-mbjfm\") pod \"placement-db-create-rlvtg\" (UID: \"783756da-1dfb-49b3-9472-d33844dffc95\") " pod="openstack/placement-db-create-rlvtg" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.419393 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/783756da-1dfb-49b3-9472-d33844dffc95-operator-scripts\") pod \"placement-db-create-rlvtg\" (UID: \"783756da-1dfb-49b3-9472-d33844dffc95\") " pod="openstack/placement-db-create-rlvtg" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.471491 4769 generic.go:334] "Generic (PLEG): container finished" podID="bb46db53-8238-4089-9d9c-e4e70675ba4d" containerID="c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6" exitCode=0 Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.471731 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.471828 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" event={"ID":"bb46db53-8238-4089-9d9c-e4e70675ba4d","Type":"ContainerDied","Data":"c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6"} Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.471908 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-72c4b" event={"ID":"bb46db53-8238-4089-9d9c-e4e70675ba4d","Type":"ContainerDied","Data":"8e167e393f2f86d3aa891804098ebbebf923154a4c1f6541680ce354f19d7bcb"} Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.471933 4769 scope.go:117] "RemoveContainer" containerID="c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.490258 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbjfm\" (UniqueName: \"kubernetes.io/projected/783756da-1dfb-49b3-9472-d33844dffc95-kube-api-access-mbjfm\") pod \"placement-db-create-rlvtg\" (UID: \"783756da-1dfb-49b3-9472-d33844dffc95\") " pod="openstack/placement-db-create-rlvtg" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.540484 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-72c4b"] Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.550431 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-72c4b"] Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.568159 4769 scope.go:117] "RemoveContainer" containerID="eed2ce8ef5b37720cd1ddd1769e6c46374cc165f6e43cb5927140c0586613841" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.653162 4769 scope.go:117] "RemoveContainer" containerID="c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6" Nov 25 10:05:28 crc kubenswrapper[4769]: E1125 10:05:28.659654 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6\": container with ID starting with c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6 not found: ID does not exist" containerID="c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.659716 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6"} err="failed to get container status \"c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6\": rpc error: code = NotFound desc = could not find container \"c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6\": container with ID starting with c03d7c62951bc39dc0c31a60b12ee7e850873b45ef6a1b0b7c6eadc1199421e6 not found: ID does not exist" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.659756 4769 scope.go:117] "RemoveContainer" containerID="eed2ce8ef5b37720cd1ddd1769e6c46374cc165f6e43cb5927140c0586613841" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.660847 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-rlvtg" Nov 25 10:05:28 crc kubenswrapper[4769]: E1125 10:05:28.661902 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eed2ce8ef5b37720cd1ddd1769e6c46374cc165f6e43cb5927140c0586613841\": container with ID starting with eed2ce8ef5b37720cd1ddd1769e6c46374cc165f6e43cb5927140c0586613841 not found: ID does not exist" containerID="eed2ce8ef5b37720cd1ddd1769e6c46374cc165f6e43cb5927140c0586613841" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.661936 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eed2ce8ef5b37720cd1ddd1769e6c46374cc165f6e43cb5927140c0586613841"} err="failed to get container status \"eed2ce8ef5b37720cd1ddd1769e6c46374cc165f6e43cb5927140c0586613841\": rpc error: code = NotFound desc = could not find container \"eed2ce8ef5b37720cd1ddd1769e6c46374cc165f6e43cb5927140c0586613841\": container with ID starting with eed2ce8ef5b37720cd1ddd1769e6c46374cc165f6e43cb5927140c0586613841 not found: ID does not exist" Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.715626 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-cjxql"] Nov 25 10:05:28 crc kubenswrapper[4769]: W1125 10:05:28.799356 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28837799_87a3_4a15_aaeb_91c949f21ef5.slice/crio-9ffe9fa2c6fe4fc99a5ccd9500a68eb4f213a637eef17fda4ff56d714cc3acaf WatchSource:0}: Error finding container 9ffe9fa2c6fe4fc99a5ccd9500a68eb4f213a637eef17fda4ff56d714cc3acaf: Status 404 returned error can't find the container with id 9ffe9fa2c6fe4fc99a5ccd9500a68eb4f213a637eef17fda4ff56d714cc3acaf Nov 25 10:05:28 crc kubenswrapper[4769]: I1125 10:05:28.901229 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-24df-account-create-vj7tp"] Nov 25 10:05:28 crc kubenswrapper[4769]: W1125 10:05:28.944020 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d7f8889_8ac3_4fbd_83be_743fd53cea7a.slice/crio-7873b6597ead7f68ba26af8ce498b049e36d8665fd2abb327d9fdc68714d3b28 WatchSource:0}: Error finding container 7873b6597ead7f68ba26af8ce498b049e36d8665fd2abb327d9fdc68714d3b28: Status 404 returned error can't find the container with id 7873b6597ead7f68ba26af8ce498b049e36d8665fd2abb327d9fdc68714d3b28 Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.310701 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-966e-account-create-cd5fr" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.329080 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2g226" Nov 25 10:05:29 crc kubenswrapper[4769]: W1125 10:05:29.366098 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod783756da_1dfb_49b3_9472_d33844dffc95.slice/crio-750ce580ec9b478f3b6f8332bbacb8fd5571aafcd92e0192a1ac7eec21eee6d4 WatchSource:0}: Error finding container 750ce580ec9b478f3b6f8332bbacb8fd5571aafcd92e0192a1ac7eec21eee6d4: Status 404 returned error can't find the container with id 750ce580ec9b478f3b6f8332bbacb8fd5571aafcd92e0192a1ac7eec21eee6d4 Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.407257 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-rlvtg"] Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.457079 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bpcn\" (UniqueName: \"kubernetes.io/projected/b4286152-9aa2-4d10-94b0-0676b434dc03-kube-api-access-8bpcn\") pod \"b4286152-9aa2-4d10-94b0-0676b434dc03\" (UID: \"b4286152-9aa2-4d10-94b0-0676b434dc03\") " Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.457192 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4286152-9aa2-4d10-94b0-0676b434dc03-operator-scripts\") pod \"b4286152-9aa2-4d10-94b0-0676b434dc03\" (UID: \"b4286152-9aa2-4d10-94b0-0676b434dc03\") " Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.457243 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgtjg\" (UniqueName: \"kubernetes.io/projected/e2f8ec92-08ca-40fd-b130-33cbd71ab39b-kube-api-access-vgtjg\") pod \"e2f8ec92-08ca-40fd-b130-33cbd71ab39b\" (UID: \"e2f8ec92-08ca-40fd-b130-33cbd71ab39b\") " Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.457619 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2f8ec92-08ca-40fd-b130-33cbd71ab39b-operator-scripts\") pod \"e2f8ec92-08ca-40fd-b130-33cbd71ab39b\" (UID: \"e2f8ec92-08ca-40fd-b130-33cbd71ab39b\") " Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.459103 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4286152-9aa2-4d10-94b0-0676b434dc03-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b4286152-9aa2-4d10-94b0-0676b434dc03" (UID: "b4286152-9aa2-4d10-94b0-0676b434dc03"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.459162 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2f8ec92-08ca-40fd-b130-33cbd71ab39b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e2f8ec92-08ca-40fd-b130-33cbd71ab39b" (UID: "e2f8ec92-08ca-40fd-b130-33cbd71ab39b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.466755 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2f8ec92-08ca-40fd-b130-33cbd71ab39b-kube-api-access-vgtjg" (OuterVolumeSpecName: "kube-api-access-vgtjg") pod "e2f8ec92-08ca-40fd-b130-33cbd71ab39b" (UID: "e2f8ec92-08ca-40fd-b130-33cbd71ab39b"). InnerVolumeSpecName "kube-api-access-vgtjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.472173 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4286152-9aa2-4d10-94b0-0676b434dc03-kube-api-access-8bpcn" (OuterVolumeSpecName: "kube-api-access-8bpcn") pod "b4286152-9aa2-4d10-94b0-0676b434dc03" (UID: "b4286152-9aa2-4d10-94b0-0676b434dc03"). InnerVolumeSpecName "kube-api-access-8bpcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.527742 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2g226" event={"ID":"b4286152-9aa2-4d10-94b0-0676b434dc03","Type":"ContainerDied","Data":"39c4a1fdb268cc6751bb3de62d1b2bbfa4d25f3b02607bf7b18dca1ce4753a18"} Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.527803 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39c4a1fdb268cc6751bb3de62d1b2bbfa4d25f3b02607bf7b18dca1ce4753a18" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.527902 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2g226" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.544602 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-rlvtg" event={"ID":"783756da-1dfb-49b3-9472-d33844dffc95","Type":"ContainerStarted","Data":"750ce580ec9b478f3b6f8332bbacb8fd5571aafcd92e0192a1ac7eec21eee6d4"} Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.546730 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-cjxql" event={"ID":"28837799-87a3-4a15-aaeb-91c949f21ef5","Type":"ContainerStarted","Data":"db03c10ecd567a8fa81fc8798c65b6894aae2cbacd881d33f3229c4043ad762e"} Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.546763 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-cjxql" event={"ID":"28837799-87a3-4a15-aaeb-91c949f21ef5","Type":"ContainerStarted","Data":"9ffe9fa2c6fe4fc99a5ccd9500a68eb4f213a637eef17fda4ff56d714cc3acaf"} Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.553477 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-24df-account-create-vj7tp" event={"ID":"7d7f8889-8ac3-4fbd-83be-743fd53cea7a","Type":"ContainerStarted","Data":"945cecfcbbaf9212d21bdec44eb73539b745cc966dcd1ec4383256a7d0c35c5c"} Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.553509 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-24df-account-create-vj7tp" event={"ID":"7d7f8889-8ac3-4fbd-83be-743fd53cea7a","Type":"ContainerStarted","Data":"7873b6597ead7f68ba26af8ce498b049e36d8665fd2abb327d9fdc68714d3b28"} Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.564379 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bpcn\" (UniqueName: \"kubernetes.io/projected/b4286152-9aa2-4d10-94b0-0676b434dc03-kube-api-access-8bpcn\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.564418 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b4286152-9aa2-4d10-94b0-0676b434dc03-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.564431 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgtjg\" (UniqueName: \"kubernetes.io/projected/e2f8ec92-08ca-40fd-b130-33cbd71ab39b-kube-api-access-vgtjg\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.564444 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2f8ec92-08ca-40fd-b130-33cbd71ab39b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.567181 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-966e-account-create-cd5fr" event={"ID":"e2f8ec92-08ca-40fd-b130-33cbd71ab39b","Type":"ContainerDied","Data":"7f3b724d383760d43a3d70805fffa06b36b747fa800e2c717df906e5d3219413"} Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.567245 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f3b724d383760d43a3d70805fffa06b36b747fa800e2c717df906e5d3219413" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.567345 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-966e-account-create-cd5fr" Nov 25 10:05:29 crc kubenswrapper[4769]: I1125 10:05:29.609212 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-24df-account-create-vj7tp" podStartSLOduration=2.609179962 podStartE2EDuration="2.609179962s" podCreationTimestamp="2025-11-25 10:05:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:05:29.587401225 +0000 UTC m=+1278.172373558" watchObservedRunningTime="2025-11-25 10:05:29.609179962 +0000 UTC m=+1278.194152285" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.253811 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb46db53-8238-4089-9d9c-e4e70675ba4d" path="/var/lib/kubelet/pods/bb46db53-8238-4089-9d9c-e4e70675ba4d/volumes" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.577953 4769 generic.go:334] "Generic (PLEG): container finished" podID="783756da-1dfb-49b3-9472-d33844dffc95" containerID="e8b88b86a1cb53cee34b690bf43238a07397b0910e4170619117262274535577" exitCode=0 Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.578084 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-rlvtg" event={"ID":"783756da-1dfb-49b3-9472-d33844dffc95","Type":"ContainerDied","Data":"e8b88b86a1cb53cee34b690bf43238a07397b0910e4170619117262274535577"} Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.580323 4769 generic.go:334] "Generic (PLEG): container finished" podID="28837799-87a3-4a15-aaeb-91c949f21ef5" containerID="db03c10ecd567a8fa81fc8798c65b6894aae2cbacd881d33f3229c4043ad762e" exitCode=0 Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.580394 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-cjxql" event={"ID":"28837799-87a3-4a15-aaeb-91c949f21ef5","Type":"ContainerDied","Data":"db03c10ecd567a8fa81fc8798c65b6894aae2cbacd881d33f3229c4043ad762e"} Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.582799 4769 generic.go:334] "Generic (PLEG): container finished" podID="7d7f8889-8ac3-4fbd-83be-743fd53cea7a" containerID="945cecfcbbaf9212d21bdec44eb73539b745cc966dcd1ec4383256a7d0c35c5c" exitCode=0 Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.582856 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-24df-account-create-vj7tp" event={"ID":"7d7f8889-8ac3-4fbd-83be-743fd53cea7a","Type":"ContainerDied","Data":"945cecfcbbaf9212d21bdec44eb73539b745cc966dcd1ec4383256a7d0c35c5c"} Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.586402 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7","Type":"ContainerStarted","Data":"8e001632f7acf9d0ae2e2b9606945759d0ddd5febb51566939358a5d94eba13a"} Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.690768 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp"] Nov 25 10:05:30 crc kubenswrapper[4769]: E1125 10:05:30.691391 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2f8ec92-08ca-40fd-b130-33cbd71ab39b" containerName="mariadb-account-create" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.691416 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2f8ec92-08ca-40fd-b130-33cbd71ab39b" containerName="mariadb-account-create" Nov 25 10:05:30 crc kubenswrapper[4769]: E1125 10:05:30.691426 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4286152-9aa2-4d10-94b0-0676b434dc03" containerName="mariadb-database-create" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.691433 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4286152-9aa2-4d10-94b0-0676b434dc03" containerName="mariadb-database-create" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.691678 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2f8ec92-08ca-40fd-b130-33cbd71ab39b" containerName="mariadb-account-create" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.691712 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4286152-9aa2-4d10-94b0-0676b434dc03" containerName="mariadb-database-create" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.692636 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.713167 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp"] Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.792434 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cd9f\" (UniqueName: \"kubernetes.io/projected/9ff8162a-1d7e-4415-b231-5d09857454cb-kube-api-access-6cd9f\") pod \"mysqld-exporter-openstack-cell1-db-create-xxdkp\" (UID: \"9ff8162a-1d7e-4415-b231-5d09857454cb\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.792768 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ff8162a-1d7e-4415-b231-5d09857454cb-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-xxdkp\" (UID: \"9ff8162a-1d7e-4415-b231-5d09857454cb\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.895769 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cd9f\" (UniqueName: \"kubernetes.io/projected/9ff8162a-1d7e-4415-b231-5d09857454cb-kube-api-access-6cd9f\") pod \"mysqld-exporter-openstack-cell1-db-create-xxdkp\" (UID: \"9ff8162a-1d7e-4415-b231-5d09857454cb\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.896587 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ff8162a-1d7e-4415-b231-5d09857454cb-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-xxdkp\" (UID: \"9ff8162a-1d7e-4415-b231-5d09857454cb\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.897370 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ff8162a-1d7e-4415-b231-5d09857454cb-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-xxdkp\" (UID: \"9ff8162a-1d7e-4415-b231-5d09857454cb\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.928921 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-9010-account-create-8r524"] Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.930860 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-9010-account-create-8r524" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.931612 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cd9f\" (UniqueName: \"kubernetes.io/projected/9ff8162a-1d7e-4415-b231-5d09857454cb-kube-api-access-6cd9f\") pod \"mysqld-exporter-openstack-cell1-db-create-xxdkp\" (UID: \"9ff8162a-1d7e-4415-b231-5d09857454cb\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.935398 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Nov 25 10:05:30 crc kubenswrapper[4769]: I1125 10:05:30.949921 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-9010-account-create-8r524"] Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.026075 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.101566 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3bc94902-4625-4198-a28e-a52f51888eea-operator-scripts\") pod \"mysqld-exporter-9010-account-create-8r524\" (UID: \"3bc94902-4625-4198-a28e-a52f51888eea\") " pod="openstack/mysqld-exporter-9010-account-create-8r524" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.101813 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2r4zc\" (UniqueName: \"kubernetes.io/projected/3bc94902-4625-4198-a28e-a52f51888eea-kube-api-access-2r4zc\") pod \"mysqld-exporter-9010-account-create-8r524\" (UID: \"3bc94902-4625-4198-a28e-a52f51888eea\") " pod="openstack/mysqld-exporter-9010-account-create-8r524" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.182757 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-cjxql" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.204646 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3bc94902-4625-4198-a28e-a52f51888eea-operator-scripts\") pod \"mysqld-exporter-9010-account-create-8r524\" (UID: \"3bc94902-4625-4198-a28e-a52f51888eea\") " pod="openstack/mysqld-exporter-9010-account-create-8r524" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.206133 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2r4zc\" (UniqueName: \"kubernetes.io/projected/3bc94902-4625-4198-a28e-a52f51888eea-kube-api-access-2r4zc\") pod \"mysqld-exporter-9010-account-create-8r524\" (UID: \"3bc94902-4625-4198-a28e-a52f51888eea\") " pod="openstack/mysqld-exporter-9010-account-create-8r524" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.205932 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3bc94902-4625-4198-a28e-a52f51888eea-operator-scripts\") pod \"mysqld-exporter-9010-account-create-8r524\" (UID: \"3bc94902-4625-4198-a28e-a52f51888eea\") " pod="openstack/mysqld-exporter-9010-account-create-8r524" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.232771 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2r4zc\" (UniqueName: \"kubernetes.io/projected/3bc94902-4625-4198-a28e-a52f51888eea-kube-api-access-2r4zc\") pod \"mysqld-exporter-9010-account-create-8r524\" (UID: \"3bc94902-4625-4198-a28e-a52f51888eea\") " pod="openstack/mysqld-exporter-9010-account-create-8r524" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.294385 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-9010-account-create-8r524" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.308338 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28837799-87a3-4a15-aaeb-91c949f21ef5-operator-scripts\") pod \"28837799-87a3-4a15-aaeb-91c949f21ef5\" (UID: \"28837799-87a3-4a15-aaeb-91c949f21ef5\") " Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.308585 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2vw5\" (UniqueName: \"kubernetes.io/projected/28837799-87a3-4a15-aaeb-91c949f21ef5-kube-api-access-s2vw5\") pod \"28837799-87a3-4a15-aaeb-91c949f21ef5\" (UID: \"28837799-87a3-4a15-aaeb-91c949f21ef5\") " Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.313168 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28837799-87a3-4a15-aaeb-91c949f21ef5-kube-api-access-s2vw5" (OuterVolumeSpecName: "kube-api-access-s2vw5") pod "28837799-87a3-4a15-aaeb-91c949f21ef5" (UID: "28837799-87a3-4a15-aaeb-91c949f21ef5"). InnerVolumeSpecName "kube-api-access-s2vw5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.411602 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2vw5\" (UniqueName: \"kubernetes.io/projected/28837799-87a3-4a15-aaeb-91c949f21ef5-kube-api-access-s2vw5\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.417473 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28837799-87a3-4a15-aaeb-91c949f21ef5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "28837799-87a3-4a15-aaeb-91c949f21ef5" (UID: "28837799-87a3-4a15-aaeb-91c949f21ef5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.515722 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28837799-87a3-4a15-aaeb-91c949f21ef5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.527713 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp"] Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.601399 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-cjxql" event={"ID":"28837799-87a3-4a15-aaeb-91c949f21ef5","Type":"ContainerDied","Data":"9ffe9fa2c6fe4fc99a5ccd9500a68eb4f213a637eef17fda4ff56d714cc3acaf"} Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.601438 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-cjxql" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.601456 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ffe9fa2c6fe4fc99a5ccd9500a68eb4f213a637eef17fda4ff56d714cc3acaf" Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.603610 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" event={"ID":"9ff8162a-1d7e-4415-b231-5d09857454cb","Type":"ContainerStarted","Data":"a649c23535fa14d70e88325952d2a0d2553e7130417a6118c812b304364533d6"} Nov 25 10:05:31 crc kubenswrapper[4769]: W1125 10:05:31.775125 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3bc94902_4625_4198_a28e_a52f51888eea.slice/crio-300578b7c53eed4e6fb655e8a7b29a346b40f579e512118a513eb2cd7181e784 WatchSource:0}: Error finding container 300578b7c53eed4e6fb655e8a7b29a346b40f579e512118a513eb2cd7181e784: Status 404 returned error can't find the container with id 300578b7c53eed4e6fb655e8a7b29a346b40f579e512118a513eb2cd7181e784 Nov 25 10:05:31 crc kubenswrapper[4769]: I1125 10:05:31.779204 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-9010-account-create-8r524"] Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.021684 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-rlvtg" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.132842 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbjfm\" (UniqueName: \"kubernetes.io/projected/783756da-1dfb-49b3-9472-d33844dffc95-kube-api-access-mbjfm\") pod \"783756da-1dfb-49b3-9472-d33844dffc95\" (UID: \"783756da-1dfb-49b3-9472-d33844dffc95\") " Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.133259 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/783756da-1dfb-49b3-9472-d33844dffc95-operator-scripts\") pod \"783756da-1dfb-49b3-9472-d33844dffc95\" (UID: \"783756da-1dfb-49b3-9472-d33844dffc95\") " Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.133949 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/783756da-1dfb-49b3-9472-d33844dffc95-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "783756da-1dfb-49b3-9472-d33844dffc95" (UID: "783756da-1dfb-49b3-9472-d33844dffc95"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.138982 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/783756da-1dfb-49b3-9472-d33844dffc95-kube-api-access-mbjfm" (OuterVolumeSpecName: "kube-api-access-mbjfm") pod "783756da-1dfb-49b3-9472-d33844dffc95" (UID: "783756da-1dfb-49b3-9472-d33844dffc95"). InnerVolumeSpecName "kube-api-access-mbjfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.225379 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-24df-account-create-vj7tp" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.241348 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/783756da-1dfb-49b3-9472-d33844dffc95-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.241384 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbjfm\" (UniqueName: \"kubernetes.io/projected/783756da-1dfb-49b3-9472-d33844dffc95-kube-api-access-mbjfm\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.342691 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7d7f8889-8ac3-4fbd-83be-743fd53cea7a-operator-scripts\") pod \"7d7f8889-8ac3-4fbd-83be-743fd53cea7a\" (UID: \"7d7f8889-8ac3-4fbd-83be-743fd53cea7a\") " Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.343038 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zr6cp\" (UniqueName: \"kubernetes.io/projected/7d7f8889-8ac3-4fbd-83be-743fd53cea7a-kube-api-access-zr6cp\") pod \"7d7f8889-8ac3-4fbd-83be-743fd53cea7a\" (UID: \"7d7f8889-8ac3-4fbd-83be-743fd53cea7a\") " Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.345935 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d7f8889-8ac3-4fbd-83be-743fd53cea7a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7d7f8889-8ac3-4fbd-83be-743fd53cea7a" (UID: "7d7f8889-8ac3-4fbd-83be-743fd53cea7a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.358255 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d7f8889-8ac3-4fbd-83be-743fd53cea7a-kube-api-access-zr6cp" (OuterVolumeSpecName: "kube-api-access-zr6cp") pod "7d7f8889-8ac3-4fbd-83be-743fd53cea7a" (UID: "7d7f8889-8ac3-4fbd-83be-743fd53cea7a"). InnerVolumeSpecName "kube-api-access-zr6cp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.446820 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7d7f8889-8ac3-4fbd-83be-743fd53cea7a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.446867 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zr6cp\" (UniqueName: \"kubernetes.io/projected/7d7f8889-8ac3-4fbd-83be-743fd53cea7a-kube-api-access-zr6cp\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.615859 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" event={"ID":"9ff8162a-1d7e-4415-b231-5d09857454cb","Type":"ContainerStarted","Data":"a30a35cfbddd31cb6341859fa87f334fc55443a6ca8812377401ce333e8e0c90"} Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.617617 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-9010-account-create-8r524" event={"ID":"3bc94902-4625-4198-a28e-a52f51888eea","Type":"ContainerStarted","Data":"e79bcdec7e358a37742ab3f695d3004794a348553ffcd822a206a99efaa99371"} Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.617644 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-9010-account-create-8r524" event={"ID":"3bc94902-4625-4198-a28e-a52f51888eea","Type":"ContainerStarted","Data":"300578b7c53eed4e6fb655e8a7b29a346b40f579e512118a513eb2cd7181e784"} Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.619275 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-rlvtg" event={"ID":"783756da-1dfb-49b3-9472-d33844dffc95","Type":"ContainerDied","Data":"750ce580ec9b478f3b6f8332bbacb8fd5571aafcd92e0192a1ac7eec21eee6d4"} Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.619302 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="750ce580ec9b478f3b6f8332bbacb8fd5571aafcd92e0192a1ac7eec21eee6d4" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.619354 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-rlvtg" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.622231 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-24df-account-create-vj7tp" event={"ID":"7d7f8889-8ac3-4fbd-83be-743fd53cea7a","Type":"ContainerDied","Data":"7873b6597ead7f68ba26af8ce498b049e36d8665fd2abb327d9fdc68714d3b28"} Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.622343 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7873b6597ead7f68ba26af8ce498b049e36d8665fd2abb327d9fdc68714d3b28" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.622423 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-24df-account-create-vj7tp" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.649838 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" podStartSLOduration=2.649812734 podStartE2EDuration="2.649812734s" podCreationTimestamp="2025-11-25 10:05:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:05:32.645301827 +0000 UTC m=+1281.230274150" watchObservedRunningTime="2025-11-25 10:05:32.649812734 +0000 UTC m=+1281.234785047" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.679477 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-9010-account-create-8r524" podStartSLOduration=2.679451485 podStartE2EDuration="2.679451485s" podCreationTimestamp="2025-11-25 10:05:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:05:32.673143841 +0000 UTC m=+1281.258116164" watchObservedRunningTime="2025-11-25 10:05:32.679451485 +0000 UTC m=+1281.264423798" Nov 25 10:05:32 crc kubenswrapper[4769]: I1125 10:05:32.913925 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 25 10:05:33 crc kubenswrapper[4769]: I1125 10:05:33.640787 4769 generic.go:334] "Generic (PLEG): container finished" podID="9ff8162a-1d7e-4415-b231-5d09857454cb" containerID="a30a35cfbddd31cb6341859fa87f334fc55443a6ca8812377401ce333e8e0c90" exitCode=0 Nov 25 10:05:33 crc kubenswrapper[4769]: I1125 10:05:33.640894 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" event={"ID":"9ff8162a-1d7e-4415-b231-5d09857454cb","Type":"ContainerDied","Data":"a30a35cfbddd31cb6341859fa87f334fc55443a6ca8812377401ce333e8e0c90"} Nov 25 10:05:33 crc kubenswrapper[4769]: I1125 10:05:33.644426 4769 generic.go:334] "Generic (PLEG): container finished" podID="3bc94902-4625-4198-a28e-a52f51888eea" containerID="e79bcdec7e358a37742ab3f695d3004794a348553ffcd822a206a99efaa99371" exitCode=0 Nov 25 10:05:33 crc kubenswrapper[4769]: I1125 10:05:33.644461 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-9010-account-create-8r524" event={"ID":"3bc94902-4625-4198-a28e-a52f51888eea","Type":"ContainerDied","Data":"e79bcdec7e358a37742ab3f695d3004794a348553ffcd822a206a99efaa99371"} Nov 25 10:05:33 crc kubenswrapper[4769]: I1125 10:05:33.821555 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:33 crc kubenswrapper[4769]: I1125 10:05:33.841610 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f7987cb4-5485-438f-bc01-c69e509b81a6-etc-swift\") pod \"swift-storage-0\" (UID: \"f7987cb4-5485-438f-bc01-c69e509b81a6\") " pod="openstack/swift-storage-0" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.014692 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-jm2np"] Nov 25 10:05:34 crc kubenswrapper[4769]: E1125 10:05:34.015366 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d7f8889-8ac3-4fbd-83be-743fd53cea7a" containerName="mariadb-account-create" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.015384 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d7f8889-8ac3-4fbd-83be-743fd53cea7a" containerName="mariadb-account-create" Nov 25 10:05:34 crc kubenswrapper[4769]: E1125 10:05:34.016051 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="783756da-1dfb-49b3-9472-d33844dffc95" containerName="mariadb-database-create" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.016070 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="783756da-1dfb-49b3-9472-d33844dffc95" containerName="mariadb-database-create" Nov 25 10:05:34 crc kubenswrapper[4769]: E1125 10:05:34.016087 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28837799-87a3-4a15-aaeb-91c949f21ef5" containerName="mariadb-database-create" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.016096 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="28837799-87a3-4a15-aaeb-91c949f21ef5" containerName="mariadb-database-create" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.016414 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="783756da-1dfb-49b3-9472-d33844dffc95" containerName="mariadb-database-create" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.016430 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="28837799-87a3-4a15-aaeb-91c949f21ef5" containerName="mariadb-database-create" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.016456 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d7f8889-8ac3-4fbd-83be-743fd53cea7a" containerName="mariadb-account-create" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.018217 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.022433 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.022611 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-45xsn" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.025714 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.033926 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-jm2np"] Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.128169 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-config-data\") pod \"glance-db-sync-jm2np\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.128264 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmdnj\" (UniqueName: \"kubernetes.io/projected/28b97e59-606d-4810-bec5-cecbc1c691bc-kube-api-access-kmdnj\") pod \"glance-db-sync-jm2np\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.128302 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-db-sync-config-data\") pod \"glance-db-sync-jm2np\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.128392 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-combined-ca-bundle\") pod \"glance-db-sync-jm2np\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.231173 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-config-data\") pod \"glance-db-sync-jm2np\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.231329 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmdnj\" (UniqueName: \"kubernetes.io/projected/28b97e59-606d-4810-bec5-cecbc1c691bc-kube-api-access-kmdnj\") pod \"glance-db-sync-jm2np\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.231375 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-db-sync-config-data\") pod \"glance-db-sync-jm2np\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.231401 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-combined-ca-bundle\") pod \"glance-db-sync-jm2np\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.241267 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-tnz6t" podUID="a94c59b5-e672-4d10-a090-89fa82afc1f3" containerName="ovn-controller" probeResult="failure" output=< Nov 25 10:05:34 crc kubenswrapper[4769]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 10:05:34 crc kubenswrapper[4769]: > Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.241775 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-db-sync-config-data\") pod \"glance-db-sync-jm2np\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.241869 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-config-data\") pod \"glance-db-sync-jm2np\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.241985 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-combined-ca-bundle\") pod \"glance-db-sync-jm2np\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.257221 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.257536 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmdnj\" (UniqueName: \"kubernetes.io/projected/28b97e59-606d-4810-bec5-cecbc1c691bc-kube-api-access-kmdnj\") pod \"glance-db-sync-jm2np\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.260318 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-2qznl" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.356083 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jm2np" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.486985 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-tnz6t-config-8qh9g"] Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.489758 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.505486 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.505594 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tnz6t-config-8qh9g"] Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.642381 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-run-ovn\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.642429 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-log-ovn\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.642466 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-run\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.642512 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-scripts\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.642799 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-additional-scripts\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.643101 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kn2wz\" (UniqueName: \"kubernetes.io/projected/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-kube-api-access-kn2wz\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.745624 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-run\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.745979 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-scripts\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.746015 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-additional-scripts\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.746081 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kn2wz\" (UniqueName: \"kubernetes.io/projected/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-kube-api-access-kn2wz\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.746164 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-run-ovn\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.746182 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-log-ovn\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.746448 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-log-ovn\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.746505 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-run\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.748413 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-scripts\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.748824 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-additional-scripts\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.749176 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-run-ovn\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.776710 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kn2wz\" (UniqueName: \"kubernetes.io/projected/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-kube-api-access-kn2wz\") pod \"ovn-controller-tnz6t-config-8qh9g\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.819679 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 10:05:34 crc kubenswrapper[4769]: I1125 10:05:34.830106 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.148281 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.282498 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-q5w5g"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.283878 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-q5w5g" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.310155 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-498c-account-create-4dtkm"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.311685 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-498c-account-create-4dtkm" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.313733 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.338699 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-q5w5g"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.365025 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-498c-account-create-4dtkm"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.373046 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bn5l6\" (UniqueName: \"kubernetes.io/projected/8c0172ef-616f-4636-b51f-4e534a850822-kube-api-access-bn5l6\") pod \"cinder-db-create-q5w5g\" (UID: \"8c0172ef-616f-4636-b51f-4e534a850822\") " pod="openstack/cinder-db-create-q5w5g" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.373128 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c0172ef-616f-4636-b51f-4e534a850822-operator-scripts\") pod \"cinder-db-create-q5w5g\" (UID: \"8c0172ef-616f-4636-b51f-4e534a850822\") " pod="openstack/cinder-db-create-q5w5g" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.432038 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-44vlp"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.433677 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-44vlp" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.446053 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-dea0-account-create-8ns6l"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.448494 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-dea0-account-create-8ns6l" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.458973 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-dea0-account-create-8ns6l"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.470873 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.474502 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-44vlp"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.482386 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njmcg\" (UniqueName: \"kubernetes.io/projected/c5155b12-87cf-4695-a19c-d54322926320-kube-api-access-njmcg\") pod \"barbican-498c-account-create-4dtkm\" (UID: \"c5155b12-87cf-4695-a19c-d54322926320\") " pod="openstack/barbican-498c-account-create-4dtkm" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.482738 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bn5l6\" (UniqueName: \"kubernetes.io/projected/8c0172ef-616f-4636-b51f-4e534a850822-kube-api-access-bn5l6\") pod \"cinder-db-create-q5w5g\" (UID: \"8c0172ef-616f-4636-b51f-4e534a850822\") " pod="openstack/cinder-db-create-q5w5g" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.482832 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c0172ef-616f-4636-b51f-4e534a850822-operator-scripts\") pod \"cinder-db-create-q5w5g\" (UID: \"8c0172ef-616f-4636-b51f-4e534a850822\") " pod="openstack/cinder-db-create-q5w5g" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.482891 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5155b12-87cf-4695-a19c-d54322926320-operator-scripts\") pod \"barbican-498c-account-create-4dtkm\" (UID: \"c5155b12-87cf-4695-a19c-d54322926320\") " pod="openstack/barbican-498c-account-create-4dtkm" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.484445 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c0172ef-616f-4636-b51f-4e534a850822-operator-scripts\") pod \"cinder-db-create-q5w5g\" (UID: \"8c0172ef-616f-4636-b51f-4e534a850822\") " pod="openstack/cinder-db-create-q5w5g" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.504871 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-kt2fl"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.506375 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.539864 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.540213 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-bnc4t" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.546527 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.547587 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.568180 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bn5l6\" (UniqueName: \"kubernetes.io/projected/8c0172ef-616f-4636-b51f-4e534a850822-kube-api-access-bn5l6\") pod \"cinder-db-create-q5w5g\" (UID: \"8c0172ef-616f-4636-b51f-4e534a850822\") " pod="openstack/cinder-db-create-q5w5g" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.599751 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-kt2fl"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.601669 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5719295-a2f1-4d2f-a021-1da5411acbc8-config-data\") pod \"keystone-db-sync-kt2fl\" (UID: \"e5719295-a2f1-4d2f-a021-1da5411acbc8\") " pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.601774 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n28ft\" (UniqueName: \"kubernetes.io/projected/11fbb390-f40d-4098-adab-b13e53b51cc8-kube-api-access-n28ft\") pod \"cinder-dea0-account-create-8ns6l\" (UID: \"11fbb390-f40d-4098-adab-b13e53b51cc8\") " pod="openstack/cinder-dea0-account-create-8ns6l" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.601999 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5719295-a2f1-4d2f-a021-1da5411acbc8-combined-ca-bundle\") pod \"keystone-db-sync-kt2fl\" (UID: \"e5719295-a2f1-4d2f-a021-1da5411acbc8\") " pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.602069 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njmcg\" (UniqueName: \"kubernetes.io/projected/c5155b12-87cf-4695-a19c-d54322926320-kube-api-access-njmcg\") pod \"barbican-498c-account-create-4dtkm\" (UID: \"c5155b12-87cf-4695-a19c-d54322926320\") " pod="openstack/barbican-498c-account-create-4dtkm" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.602100 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25f32f1a-8af2-4632-8a1e-689576d2d17b-operator-scripts\") pod \"barbican-db-create-44vlp\" (UID: \"25f32f1a-8af2-4632-8a1e-689576d2d17b\") " pod="openstack/barbican-db-create-44vlp" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.602199 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcdfk\" (UniqueName: \"kubernetes.io/projected/e5719295-a2f1-4d2f-a021-1da5411acbc8-kube-api-access-hcdfk\") pod \"keystone-db-sync-kt2fl\" (UID: \"e5719295-a2f1-4d2f-a021-1da5411acbc8\") " pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.602238 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8s4t\" (UniqueName: \"kubernetes.io/projected/25f32f1a-8af2-4632-8a1e-689576d2d17b-kube-api-access-z8s4t\") pod \"barbican-db-create-44vlp\" (UID: \"25f32f1a-8af2-4632-8a1e-689576d2d17b\") " pod="openstack/barbican-db-create-44vlp" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.602323 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fbb390-f40d-4098-adab-b13e53b51cc8-operator-scripts\") pod \"cinder-dea0-account-create-8ns6l\" (UID: \"11fbb390-f40d-4098-adab-b13e53b51cc8\") " pod="openstack/cinder-dea0-account-create-8ns6l" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.602712 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5155b12-87cf-4695-a19c-d54322926320-operator-scripts\") pod \"barbican-498c-account-create-4dtkm\" (UID: \"c5155b12-87cf-4695-a19c-d54322926320\") " pod="openstack/barbican-498c-account-create-4dtkm" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.604662 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5155b12-87cf-4695-a19c-d54322926320-operator-scripts\") pod \"barbican-498c-account-create-4dtkm\" (UID: \"c5155b12-87cf-4695-a19c-d54322926320\") " pod="openstack/barbican-498c-account-create-4dtkm" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.622595 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-kd9nw"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.623778 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njmcg\" (UniqueName: \"kubernetes.io/projected/c5155b12-87cf-4695-a19c-d54322926320-kube-api-access-njmcg\") pod \"barbican-498c-account-create-4dtkm\" (UID: \"c5155b12-87cf-4695-a19c-d54322926320\") " pod="openstack/barbican-498c-account-create-4dtkm" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.624773 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-kd9nw" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.639092 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-e28d-account-create-w5wr6"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.641666 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-e28d-account-create-w5wr6" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.642244 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-q5w5g" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.645941 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.661904 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-498c-account-create-4dtkm" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.664604 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-kd9nw"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.682126 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-e28d-account-create-w5wr6"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.705726 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5719295-a2f1-4d2f-a021-1da5411acbc8-config-data\") pod \"keystone-db-sync-kt2fl\" (UID: \"e5719295-a2f1-4d2f-a021-1da5411acbc8\") " pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.705814 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2550677-acee-408a-86c9-149a068d10a1-operator-scripts\") pod \"heat-e28d-account-create-w5wr6\" (UID: \"d2550677-acee-408a-86c9-149a068d10a1\") " pod="openstack/heat-e28d-account-create-w5wr6" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.705848 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n28ft\" (UniqueName: \"kubernetes.io/projected/11fbb390-f40d-4098-adab-b13e53b51cc8-kube-api-access-n28ft\") pod \"cinder-dea0-account-create-8ns6l\" (UID: \"11fbb390-f40d-4098-adab-b13e53b51cc8\") " pod="openstack/cinder-dea0-account-create-8ns6l" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.705889 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/192781af-b5a8-4804-b3a9-a53290328c8c-operator-scripts\") pod \"heat-db-create-kd9nw\" (UID: \"192781af-b5a8-4804-b3a9-a53290328c8c\") " pod="openstack/heat-db-create-kd9nw" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.705929 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5719295-a2f1-4d2f-a021-1da5411acbc8-combined-ca-bundle\") pod \"keystone-db-sync-kt2fl\" (UID: \"e5719295-a2f1-4d2f-a021-1da5411acbc8\") " pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.705952 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nclb9\" (UniqueName: \"kubernetes.io/projected/d2550677-acee-408a-86c9-149a068d10a1-kube-api-access-nclb9\") pod \"heat-e28d-account-create-w5wr6\" (UID: \"d2550677-acee-408a-86c9-149a068d10a1\") " pod="openstack/heat-e28d-account-create-w5wr6" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.706124 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25f32f1a-8af2-4632-8a1e-689576d2d17b-operator-scripts\") pod \"barbican-db-create-44vlp\" (UID: \"25f32f1a-8af2-4632-8a1e-689576d2d17b\") " pod="openstack/barbican-db-create-44vlp" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.706289 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcdfk\" (UniqueName: \"kubernetes.io/projected/e5719295-a2f1-4d2f-a021-1da5411acbc8-kube-api-access-hcdfk\") pod \"keystone-db-sync-kt2fl\" (UID: \"e5719295-a2f1-4d2f-a021-1da5411acbc8\") " pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.706343 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8s4t\" (UniqueName: \"kubernetes.io/projected/25f32f1a-8af2-4632-8a1e-689576d2d17b-kube-api-access-z8s4t\") pod \"barbican-db-create-44vlp\" (UID: \"25f32f1a-8af2-4632-8a1e-689576d2d17b\") " pod="openstack/barbican-db-create-44vlp" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.706429 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fbb390-f40d-4098-adab-b13e53b51cc8-operator-scripts\") pod \"cinder-dea0-account-create-8ns6l\" (UID: \"11fbb390-f40d-4098-adab-b13e53b51cc8\") " pod="openstack/cinder-dea0-account-create-8ns6l" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.706516 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsr84\" (UniqueName: \"kubernetes.io/projected/192781af-b5a8-4804-b3a9-a53290328c8c-kube-api-access-xsr84\") pod \"heat-db-create-kd9nw\" (UID: \"192781af-b5a8-4804-b3a9-a53290328c8c\") " pod="openstack/heat-db-create-kd9nw" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.706948 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25f32f1a-8af2-4632-8a1e-689576d2d17b-operator-scripts\") pod \"barbican-db-create-44vlp\" (UID: \"25f32f1a-8af2-4632-8a1e-689576d2d17b\") " pod="openstack/barbican-db-create-44vlp" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.707401 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fbb390-f40d-4098-adab-b13e53b51cc8-operator-scripts\") pod \"cinder-dea0-account-create-8ns6l\" (UID: \"11fbb390-f40d-4098-adab-b13e53b51cc8\") " pod="openstack/cinder-dea0-account-create-8ns6l" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.709946 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5719295-a2f1-4d2f-a021-1da5411acbc8-combined-ca-bundle\") pod \"keystone-db-sync-kt2fl\" (UID: \"e5719295-a2f1-4d2f-a021-1da5411acbc8\") " pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.722783 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5719295-a2f1-4d2f-a021-1da5411acbc8-config-data\") pod \"keystone-db-sync-kt2fl\" (UID: \"e5719295-a2f1-4d2f-a021-1da5411acbc8\") " pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.723150 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n28ft\" (UniqueName: \"kubernetes.io/projected/11fbb390-f40d-4098-adab-b13e53b51cc8-kube-api-access-n28ft\") pod \"cinder-dea0-account-create-8ns6l\" (UID: \"11fbb390-f40d-4098-adab-b13e53b51cc8\") " pod="openstack/cinder-dea0-account-create-8ns6l" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.724414 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8s4t\" (UniqueName: \"kubernetes.io/projected/25f32f1a-8af2-4632-8a1e-689576d2d17b-kube-api-access-z8s4t\") pod \"barbican-db-create-44vlp\" (UID: \"25f32f1a-8af2-4632-8a1e-689576d2d17b\") " pod="openstack/barbican-db-create-44vlp" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.726310 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcdfk\" (UniqueName: \"kubernetes.io/projected/e5719295-a2f1-4d2f-a021-1da5411acbc8-kube-api-access-hcdfk\") pod \"keystone-db-sync-kt2fl\" (UID: \"e5719295-a2f1-4d2f-a021-1da5411acbc8\") " pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.763797 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-44vlp" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.784672 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-dea0-account-create-8ns6l" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.796003 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-8r5jb"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.807435 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8r5jb" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.811574 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsr84\" (UniqueName: \"kubernetes.io/projected/192781af-b5a8-4804-b3a9-a53290328c8c-kube-api-access-xsr84\") pod \"heat-db-create-kd9nw\" (UID: \"192781af-b5a8-4804-b3a9-a53290328c8c\") " pod="openstack/heat-db-create-kd9nw" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.811825 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2550677-acee-408a-86c9-149a068d10a1-operator-scripts\") pod \"heat-e28d-account-create-w5wr6\" (UID: \"d2550677-acee-408a-86c9-149a068d10a1\") " pod="openstack/heat-e28d-account-create-w5wr6" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.811916 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/192781af-b5a8-4804-b3a9-a53290328c8c-operator-scripts\") pod \"heat-db-create-kd9nw\" (UID: \"192781af-b5a8-4804-b3a9-a53290328c8c\") " pod="openstack/heat-db-create-kd9nw" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.812037 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nclb9\" (UniqueName: \"kubernetes.io/projected/d2550677-acee-408a-86c9-149a068d10a1-kube-api-access-nclb9\") pod \"heat-e28d-account-create-w5wr6\" (UID: \"d2550677-acee-408a-86c9-149a068d10a1\") " pod="openstack/heat-e28d-account-create-w5wr6" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.813262 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2550677-acee-408a-86c9-149a068d10a1-operator-scripts\") pod \"heat-e28d-account-create-w5wr6\" (UID: \"d2550677-acee-408a-86c9-149a068d10a1\") " pod="openstack/heat-e28d-account-create-w5wr6" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.814232 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/192781af-b5a8-4804-b3a9-a53290328c8c-operator-scripts\") pod \"heat-db-create-kd9nw\" (UID: \"192781af-b5a8-4804-b3a9-a53290328c8c\") " pod="openstack/heat-db-create-kd9nw" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.820164 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-1a9a-account-create-v2rj6"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.821599 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-1a9a-account-create-v2rj6" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.829300 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.842184 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nclb9\" (UniqueName: \"kubernetes.io/projected/d2550677-acee-408a-86c9-149a068d10a1-kube-api-access-nclb9\") pod \"heat-e28d-account-create-w5wr6\" (UID: \"d2550677-acee-408a-86c9-149a068d10a1\") " pod="openstack/heat-e28d-account-create-w5wr6" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.843803 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-1a9a-account-create-v2rj6"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.870492 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsr84\" (UniqueName: \"kubernetes.io/projected/192781af-b5a8-4804-b3a9-a53290328c8c-kube-api-access-xsr84\") pod \"heat-db-create-kd9nw\" (UID: \"192781af-b5a8-4804-b3a9-a53290328c8c\") " pod="openstack/heat-db-create-kd9nw" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.872534 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.894767 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-8r5jb"] Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.914578 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4223d39-07d7-421b-b3b7-953758e77444-operator-scripts\") pod \"neutron-db-create-8r5jb\" (UID: \"a4223d39-07d7-421b-b3b7-953758e77444\") " pod="openstack/neutron-db-create-8r5jb" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.914921 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27cb33b2-18c8-4d25-a441-4bdc64d8bfde-operator-scripts\") pod \"neutron-1a9a-account-create-v2rj6\" (UID: \"27cb33b2-18c8-4d25-a441-4bdc64d8bfde\") " pod="openstack/neutron-1a9a-account-create-v2rj6" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.915086 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqphd\" (UniqueName: \"kubernetes.io/projected/27cb33b2-18c8-4d25-a441-4bdc64d8bfde-kube-api-access-jqphd\") pod \"neutron-1a9a-account-create-v2rj6\" (UID: \"27cb33b2-18c8-4d25-a441-4bdc64d8bfde\") " pod="openstack/neutron-1a9a-account-create-v2rj6" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.915119 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4hpd\" (UniqueName: \"kubernetes.io/projected/a4223d39-07d7-421b-b3b7-953758e77444-kube-api-access-f4hpd\") pod \"neutron-db-create-8r5jb\" (UID: \"a4223d39-07d7-421b-b3b7-953758e77444\") " pod="openstack/neutron-db-create-8r5jb" Nov 25 10:05:35 crc kubenswrapper[4769]: I1125 10:05:35.995208 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-kd9nw" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.003002 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-e28d-account-create-w5wr6" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.016713 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27cb33b2-18c8-4d25-a441-4bdc64d8bfde-operator-scripts\") pod \"neutron-1a9a-account-create-v2rj6\" (UID: \"27cb33b2-18c8-4d25-a441-4bdc64d8bfde\") " pod="openstack/neutron-1a9a-account-create-v2rj6" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.016817 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqphd\" (UniqueName: \"kubernetes.io/projected/27cb33b2-18c8-4d25-a441-4bdc64d8bfde-kube-api-access-jqphd\") pod \"neutron-1a9a-account-create-v2rj6\" (UID: \"27cb33b2-18c8-4d25-a441-4bdc64d8bfde\") " pod="openstack/neutron-1a9a-account-create-v2rj6" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.016860 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4hpd\" (UniqueName: \"kubernetes.io/projected/a4223d39-07d7-421b-b3b7-953758e77444-kube-api-access-f4hpd\") pod \"neutron-db-create-8r5jb\" (UID: \"a4223d39-07d7-421b-b3b7-953758e77444\") " pod="openstack/neutron-db-create-8r5jb" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.016950 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4223d39-07d7-421b-b3b7-953758e77444-operator-scripts\") pod \"neutron-db-create-8r5jb\" (UID: \"a4223d39-07d7-421b-b3b7-953758e77444\") " pod="openstack/neutron-db-create-8r5jb" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.017803 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4223d39-07d7-421b-b3b7-953758e77444-operator-scripts\") pod \"neutron-db-create-8r5jb\" (UID: \"a4223d39-07d7-421b-b3b7-953758e77444\") " pod="openstack/neutron-db-create-8r5jb" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.018688 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27cb33b2-18c8-4d25-a441-4bdc64d8bfde-operator-scripts\") pod \"neutron-1a9a-account-create-v2rj6\" (UID: \"27cb33b2-18c8-4d25-a441-4bdc64d8bfde\") " pod="openstack/neutron-1a9a-account-create-v2rj6" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.038060 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4hpd\" (UniqueName: \"kubernetes.io/projected/a4223d39-07d7-421b-b3b7-953758e77444-kube-api-access-f4hpd\") pod \"neutron-db-create-8r5jb\" (UID: \"a4223d39-07d7-421b-b3b7-953758e77444\") " pod="openstack/neutron-db-create-8r5jb" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.038217 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqphd\" (UniqueName: \"kubernetes.io/projected/27cb33b2-18c8-4d25-a441-4bdc64d8bfde-kube-api-access-jqphd\") pod \"neutron-1a9a-account-create-v2rj6\" (UID: \"27cb33b2-18c8-4d25-a441-4bdc64d8bfde\") " pod="openstack/neutron-1a9a-account-create-v2rj6" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.209651 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8r5jb" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.209932 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-1a9a-account-create-v2rj6" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.695572 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" event={"ID":"9ff8162a-1d7e-4415-b231-5d09857454cb","Type":"ContainerDied","Data":"a649c23535fa14d70e88325952d2a0d2553e7130417a6118c812b304364533d6"} Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.696064 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a649c23535fa14d70e88325952d2a0d2553e7130417a6118c812b304364533d6" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.823562 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.831760 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-9010-account-create-8r524" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.939844 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2r4zc\" (UniqueName: \"kubernetes.io/projected/3bc94902-4625-4198-a28e-a52f51888eea-kube-api-access-2r4zc\") pod \"3bc94902-4625-4198-a28e-a52f51888eea\" (UID: \"3bc94902-4625-4198-a28e-a52f51888eea\") " Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.939927 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cd9f\" (UniqueName: \"kubernetes.io/projected/9ff8162a-1d7e-4415-b231-5d09857454cb-kube-api-access-6cd9f\") pod \"9ff8162a-1d7e-4415-b231-5d09857454cb\" (UID: \"9ff8162a-1d7e-4415-b231-5d09857454cb\") " Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.939985 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ff8162a-1d7e-4415-b231-5d09857454cb-operator-scripts\") pod \"9ff8162a-1d7e-4415-b231-5d09857454cb\" (UID: \"9ff8162a-1d7e-4415-b231-5d09857454cb\") " Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.940166 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3bc94902-4625-4198-a28e-a52f51888eea-operator-scripts\") pod \"3bc94902-4625-4198-a28e-a52f51888eea\" (UID: \"3bc94902-4625-4198-a28e-a52f51888eea\") " Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.941832 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bc94902-4625-4198-a28e-a52f51888eea-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3bc94902-4625-4198-a28e-a52f51888eea" (UID: "3bc94902-4625-4198-a28e-a52f51888eea"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.944622 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ff8162a-1d7e-4415-b231-5d09857454cb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9ff8162a-1d7e-4415-b231-5d09857454cb" (UID: "9ff8162a-1d7e-4415-b231-5d09857454cb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.949217 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bc94902-4625-4198-a28e-a52f51888eea-kube-api-access-2r4zc" (OuterVolumeSpecName: "kube-api-access-2r4zc") pod "3bc94902-4625-4198-a28e-a52f51888eea" (UID: "3bc94902-4625-4198-a28e-a52f51888eea"). InnerVolumeSpecName "kube-api-access-2r4zc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:36 crc kubenswrapper[4769]: I1125 10:05:36.954484 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ff8162a-1d7e-4415-b231-5d09857454cb-kube-api-access-6cd9f" (OuterVolumeSpecName: "kube-api-access-6cd9f") pod "9ff8162a-1d7e-4415-b231-5d09857454cb" (UID: "9ff8162a-1d7e-4415-b231-5d09857454cb"). InnerVolumeSpecName "kube-api-access-6cd9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:37 crc kubenswrapper[4769]: I1125 10:05:37.046643 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2r4zc\" (UniqueName: \"kubernetes.io/projected/3bc94902-4625-4198-a28e-a52f51888eea-kube-api-access-2r4zc\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:37 crc kubenswrapper[4769]: I1125 10:05:37.046678 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cd9f\" (UniqueName: \"kubernetes.io/projected/9ff8162a-1d7e-4415-b231-5d09857454cb-kube-api-access-6cd9f\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:37 crc kubenswrapper[4769]: I1125 10:05:37.046690 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ff8162a-1d7e-4415-b231-5d09857454cb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:37 crc kubenswrapper[4769]: I1125 10:05:37.046700 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3bc94902-4625-4198-a28e-a52f51888eea-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:37 crc kubenswrapper[4769]: I1125 10:05:37.697277 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-q5w5g"] Nov 25 10:05:37 crc kubenswrapper[4769]: I1125 10:05:37.716847 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-9010-account-create-8r524" event={"ID":"3bc94902-4625-4198-a28e-a52f51888eea","Type":"ContainerDied","Data":"300578b7c53eed4e6fb655e8a7b29a346b40f579e512118a513eb2cd7181e784"} Nov 25 10:05:37 crc kubenswrapper[4769]: I1125 10:05:37.716897 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="300578b7c53eed4e6fb655e8a7b29a346b40f579e512118a513eb2cd7181e784" Nov 25 10:05:37 crc kubenswrapper[4769]: I1125 10:05:37.717081 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-9010-account-create-8r524" Nov 25 10:05:37 crc kubenswrapper[4769]: I1125 10:05:37.721258 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-q5w5g" event={"ID":"8c0172ef-616f-4636-b51f-4e534a850822","Type":"ContainerStarted","Data":"724dff3a93e1aa060c526777a2ae4e484478fb958089738a3fcaaf11a16d430a"} Nov 25 10:05:37 crc kubenswrapper[4769]: I1125 10:05:37.727078 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp" Nov 25 10:05:37 crc kubenswrapper[4769]: I1125 10:05:37.727726 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7","Type":"ContainerStarted","Data":"fd15f4c6c259b869ec7062dcb9d286aec188d93849849584f7ec7ab2cedcd01d"} Nov 25 10:05:37 crc kubenswrapper[4769]: I1125 10:05:37.769637 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=16.786995907 podStartE2EDuration="1m7.76961757s" podCreationTimestamp="2025-11-25 10:04:30 +0000 UTC" firstStartedPulling="2025-11-25 10:04:45.996490099 +0000 UTC m=+1234.581462422" lastFinishedPulling="2025-11-25 10:05:36.979111772 +0000 UTC m=+1285.564084085" observedRunningTime="2025-11-25 10:05:37.760839832 +0000 UTC m=+1286.345812145" watchObservedRunningTime="2025-11-25 10:05:37.76961757 +0000 UTC m=+1286.354589883" Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.209208 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-dea0-account-create-8ns6l"] Nov 25 10:05:38 crc kubenswrapper[4769]: W1125 10:05:38.255278 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5155b12_87cf_4695_a19c_d54322926320.slice/crio-67a943a283eb4fea587ac4d39731052127e14cd83f9336db694ef8b83042a834 WatchSource:0}: Error finding container 67a943a283eb4fea587ac4d39731052127e14cd83f9336db694ef8b83042a834: Status 404 returned error can't find the container with id 67a943a283eb4fea587ac4d39731052127e14cd83f9336db694ef8b83042a834 Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.265177 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-kd9nw"] Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.265213 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-498c-account-create-4dtkm"] Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.323747 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-44vlp"] Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.341345 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tnz6t-config-8qh9g"] Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.427868 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.486872 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-jm2np"] Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.625991 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-e28d-account-create-w5wr6"] Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.652510 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-1a9a-account-create-v2rj6"] Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.670932 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-kt2fl"] Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.686006 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-8r5jb"] Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.764420 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-kt2fl" event={"ID":"e5719295-a2f1-4d2f-a021-1da5411acbc8","Type":"ContainerStarted","Data":"ce4bd62f83937bd24de5863470058c9e11e0f89c573fb87b85306125a67c3ccf"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.788450 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-498c-account-create-4dtkm" event={"ID":"c5155b12-87cf-4695-a19c-d54322926320","Type":"ContainerStarted","Data":"7c481239b8be7e86fff05999c53aecfeb57843ac547d565d8963b89952d03fe7"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.788530 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-498c-account-create-4dtkm" event={"ID":"c5155b12-87cf-4695-a19c-d54322926320","Type":"ContainerStarted","Data":"67a943a283eb4fea587ac4d39731052127e14cd83f9336db694ef8b83042a834"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.797525 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8r5jb" event={"ID":"a4223d39-07d7-421b-b3b7-953758e77444","Type":"ContainerStarted","Data":"b2e86c6d6eb8a09cd7f236729edd324a0ad8eaf534f3d1b393e42ee43d16556d"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.818238 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jm2np" event={"ID":"28b97e59-606d-4810-bec5-cecbc1c691bc","Type":"ContainerStarted","Data":"387eee9d306f0fd00088598a66c1c7e341e649f3b21a8e6e37dde9ffdeb861ca"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.828285 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-498c-account-create-4dtkm" podStartSLOduration=3.828248254 podStartE2EDuration="3.828248254s" podCreationTimestamp="2025-11-25 10:05:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:05:38.817921205 +0000 UTC m=+1287.402893538" watchObservedRunningTime="2025-11-25 10:05:38.828248254 +0000 UTC m=+1287.413220567" Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.832083 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-dea0-account-create-8ns6l" event={"ID":"11fbb390-f40d-4098-adab-b13e53b51cc8","Type":"ContainerStarted","Data":"d334b21beaaff8a289ce74d803cb34309fdca5b99bee7152a261357c95dedd6c"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.832130 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-dea0-account-create-8ns6l" event={"ID":"11fbb390-f40d-4098-adab-b13e53b51cc8","Type":"ContainerStarted","Data":"8f31f893b7c5b2e2076730a3bb2957a237c98909f75adb6ac049ca50d214dc15"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.835337 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"83abc9b819f57de0d03c35e4c69b2aa51433a53a662813044a38e9580d8c6358"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.837434 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnz6t-config-8qh9g" event={"ID":"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02","Type":"ContainerStarted","Data":"0631db82193899290a4026e5fc5447980b28388b0ec40d2d9133f756bd245345"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.839946 4769 generic.go:334] "Generic (PLEG): container finished" podID="8c0172ef-616f-4636-b51f-4e534a850822" containerID="0737cf1e9f3c9914d55a1c16965fb73261025d22adb852ebf9bc02abf8feae61" exitCode=0 Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.840086 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-q5w5g" event={"ID":"8c0172ef-616f-4636-b51f-4e534a850822","Type":"ContainerDied","Data":"0737cf1e9f3c9914d55a1c16965fb73261025d22adb852ebf9bc02abf8feae61"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.841082 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-44vlp" event={"ID":"25f32f1a-8af2-4632-8a1e-689576d2d17b","Type":"ContainerStarted","Data":"7aa50de0e83672613acfcfeb8f49da9d90c5e843c68c36d6bab6c7562f0c97c1"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.841954 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-1a9a-account-create-v2rj6" event={"ID":"27cb33b2-18c8-4d25-a441-4bdc64d8bfde","Type":"ContainerStarted","Data":"7820525cfd6c4a7b860ea8ba995c6970ab7110bd4753af0028ae4f047a32dacb"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.842881 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-e28d-account-create-w5wr6" event={"ID":"d2550677-acee-408a-86c9-149a068d10a1","Type":"ContainerStarted","Data":"2901be68c28b8f5d76741723baa76bab6ecf2dfc29fa36f2fbd0c83292d5fbad"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.846386 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-kd9nw" event={"ID":"192781af-b5a8-4804-b3a9-a53290328c8c","Type":"ContainerStarted","Data":"6cd2ed523228dd14223e9d41d61effb2954c846e1c8f67fc0edce305b18579d2"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.846411 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-kd9nw" event={"ID":"192781af-b5a8-4804-b3a9-a53290328c8c","Type":"ContainerStarted","Data":"afa18bd37fd361d96f017ecd7a3af0fda8d51eb93ec4693ea70b6a5e02a23f9c"} Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.849593 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-dea0-account-create-8ns6l" podStartSLOduration=3.849577749 podStartE2EDuration="3.849577749s" podCreationTimestamp="2025-11-25 10:05:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:05:38.847230298 +0000 UTC m=+1287.432202611" watchObservedRunningTime="2025-11-25 10:05:38.849577749 +0000 UTC m=+1287.434550052" Nov 25 10:05:38 crc kubenswrapper[4769]: I1125 10:05:38.881379 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-create-kd9nw" podStartSLOduration=3.881355825 podStartE2EDuration="3.881355825s" podCreationTimestamp="2025-11-25 10:05:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:05:38.879929568 +0000 UTC m=+1287.464901881" watchObservedRunningTime="2025-11-25 10:05:38.881355825 +0000 UTC m=+1287.466328138" Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.422695 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-tnz6t" podUID="a94c59b5-e672-4d10-a090-89fa82afc1f3" containerName="ovn-controller" probeResult="failure" output=< Nov 25 10:05:39 crc kubenswrapper[4769]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 10:05:39 crc kubenswrapper[4769]: > Nov 25 10:05:39 crc kubenswrapper[4769]: E1125 10:05:39.839883 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4223d39_07d7_421b_b3b7_953758e77444.slice/crio-conmon-2c1bd92ce912803b300403769ebf27817171f025600603407568e2704132a5cd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd2550677_acee_408a_86c9_149a068d10a1.slice/crio-conmon-1d9188b083315ab023833cbc7f4522c645acd127b5322b4267af0310ee8e9240.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b7f14f6_2e75_4bf4_9c6c_3e3599047d02.slice/crio-conmon-e0ebfa1cf62473ea4d5841cc8213e9832599f8c25f90fcd72a7c7ecd99c84878.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b7f14f6_2e75_4bf4_9c6c_3e3599047d02.slice/crio-e0ebfa1cf62473ea4d5841cc8213e9832599f8c25f90fcd72a7c7ecd99c84878.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4223d39_07d7_421b_b3b7_953758e77444.slice/crio-2c1bd92ce912803b300403769ebf27817171f025600603407568e2704132a5cd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27cb33b2_18c8_4d25_a441_4bdc64d8bfde.slice/crio-855770c5a65f55b54761d784a001d20212b2240c528cce318e0a2a72c055c7f1.scope\": RecentStats: unable to find data in memory cache]" Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.856562 4769 generic.go:334] "Generic (PLEG): container finished" podID="25f32f1a-8af2-4632-8a1e-689576d2d17b" containerID="ec9b19989db202844e33f037416fd13d79f2e055b6bdd6f2e3bb8d99565e99a9" exitCode=0 Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.856659 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-44vlp" event={"ID":"25f32f1a-8af2-4632-8a1e-689576d2d17b","Type":"ContainerDied","Data":"ec9b19989db202844e33f037416fd13d79f2e055b6bdd6f2e3bb8d99565e99a9"} Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.860990 4769 generic.go:334] "Generic (PLEG): container finished" podID="11fbb390-f40d-4098-adab-b13e53b51cc8" containerID="d334b21beaaff8a289ce74d803cb34309fdca5b99bee7152a261357c95dedd6c" exitCode=0 Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.861076 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-dea0-account-create-8ns6l" event={"ID":"11fbb390-f40d-4098-adab-b13e53b51cc8","Type":"ContainerDied","Data":"d334b21beaaff8a289ce74d803cb34309fdca5b99bee7152a261357c95dedd6c"} Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.863845 4769 generic.go:334] "Generic (PLEG): container finished" podID="27cb33b2-18c8-4d25-a441-4bdc64d8bfde" containerID="855770c5a65f55b54761d784a001d20212b2240c528cce318e0a2a72c055c7f1" exitCode=0 Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.863919 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-1a9a-account-create-v2rj6" event={"ID":"27cb33b2-18c8-4d25-a441-4bdc64d8bfde","Type":"ContainerDied","Data":"855770c5a65f55b54761d784a001d20212b2240c528cce318e0a2a72c055c7f1"} Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.865912 4769 generic.go:334] "Generic (PLEG): container finished" podID="d2550677-acee-408a-86c9-149a068d10a1" containerID="1d9188b083315ab023833cbc7f4522c645acd127b5322b4267af0310ee8e9240" exitCode=0 Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.866059 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-e28d-account-create-w5wr6" event={"ID":"d2550677-acee-408a-86c9-149a068d10a1","Type":"ContainerDied","Data":"1d9188b083315ab023833cbc7f4522c645acd127b5322b4267af0310ee8e9240"} Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.869994 4769 generic.go:334] "Generic (PLEG): container finished" podID="5b7f14f6-2e75-4bf4-9c6c-3e3599047d02" containerID="e0ebfa1cf62473ea4d5841cc8213e9832599f8c25f90fcd72a7c7ecd99c84878" exitCode=0 Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.870169 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnz6t-config-8qh9g" event={"ID":"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02","Type":"ContainerDied","Data":"e0ebfa1cf62473ea4d5841cc8213e9832599f8c25f90fcd72a7c7ecd99c84878"} Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.875323 4769 generic.go:334] "Generic (PLEG): container finished" podID="192781af-b5a8-4804-b3a9-a53290328c8c" containerID="6cd2ed523228dd14223e9d41d61effb2954c846e1c8f67fc0edce305b18579d2" exitCode=0 Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.875384 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-kd9nw" event={"ID":"192781af-b5a8-4804-b3a9-a53290328c8c","Type":"ContainerDied","Data":"6cd2ed523228dd14223e9d41d61effb2954c846e1c8f67fc0edce305b18579d2"} Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.877319 4769 generic.go:334] "Generic (PLEG): container finished" podID="c5155b12-87cf-4695-a19c-d54322926320" containerID="7c481239b8be7e86fff05999c53aecfeb57843ac547d565d8963b89952d03fe7" exitCode=0 Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.877383 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-498c-account-create-4dtkm" event={"ID":"c5155b12-87cf-4695-a19c-d54322926320","Type":"ContainerDied","Data":"7c481239b8be7e86fff05999c53aecfeb57843ac547d565d8963b89952d03fe7"} Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.881837 4769 generic.go:334] "Generic (PLEG): container finished" podID="a4223d39-07d7-421b-b3b7-953758e77444" containerID="2c1bd92ce912803b300403769ebf27817171f025600603407568e2704132a5cd" exitCode=0 Nov 25 10:05:39 crc kubenswrapper[4769]: I1125 10:05:39.882219 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8r5jb" event={"ID":"a4223d39-07d7-421b-b3b7-953758e77444","Type":"ContainerDied","Data":"2c1bd92ce912803b300403769ebf27817171f025600603407568e2704132a5cd"} Nov 25 10:05:40 crc kubenswrapper[4769]: I1125 10:05:40.417299 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-q5w5g" Nov 25 10:05:40 crc kubenswrapper[4769]: I1125 10:05:40.471577 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bn5l6\" (UniqueName: \"kubernetes.io/projected/8c0172ef-616f-4636-b51f-4e534a850822-kube-api-access-bn5l6\") pod \"8c0172ef-616f-4636-b51f-4e534a850822\" (UID: \"8c0172ef-616f-4636-b51f-4e534a850822\") " Nov 25 10:05:40 crc kubenswrapper[4769]: I1125 10:05:40.472326 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c0172ef-616f-4636-b51f-4e534a850822-operator-scripts\") pod \"8c0172ef-616f-4636-b51f-4e534a850822\" (UID: \"8c0172ef-616f-4636-b51f-4e534a850822\") " Nov 25 10:05:40 crc kubenswrapper[4769]: I1125 10:05:40.473124 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c0172ef-616f-4636-b51f-4e534a850822-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8c0172ef-616f-4636-b51f-4e534a850822" (UID: "8c0172ef-616f-4636-b51f-4e534a850822"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:40 crc kubenswrapper[4769]: I1125 10:05:40.480174 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c0172ef-616f-4636-b51f-4e534a850822-kube-api-access-bn5l6" (OuterVolumeSpecName: "kube-api-access-bn5l6") pod "8c0172ef-616f-4636-b51f-4e534a850822" (UID: "8c0172ef-616f-4636-b51f-4e534a850822"). InnerVolumeSpecName "kube-api-access-bn5l6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:40 crc kubenswrapper[4769]: I1125 10:05:40.575686 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bn5l6\" (UniqueName: \"kubernetes.io/projected/8c0172ef-616f-4636-b51f-4e534a850822-kube-api-access-bn5l6\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:40 crc kubenswrapper[4769]: I1125 10:05:40.575774 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c0172ef-616f-4636-b51f-4e534a850822-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:40 crc kubenswrapper[4769]: I1125 10:05:40.897591 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-q5w5g" event={"ID":"8c0172ef-616f-4636-b51f-4e534a850822","Type":"ContainerDied","Data":"724dff3a93e1aa060c526777a2ae4e484478fb958089738a3fcaaf11a16d430a"} Nov 25 10:05:40 crc kubenswrapper[4769]: I1125 10:05:40.897643 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="724dff3a93e1aa060c526777a2ae4e484478fb958089738a3fcaaf11a16d430a" Nov 25 10:05:40 crc kubenswrapper[4769]: I1125 10:05:40.897655 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-q5w5g" Nov 25 10:05:40 crc kubenswrapper[4769]: I1125 10:05:40.901722 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"0857fbc37d37e8951744079d6d9959ea639e551ac1e1d3ac31cbd9d2abfb96db"} Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.267826 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-498c-account-create-4dtkm" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.281616 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:05:41 crc kubenswrapper[4769]: E1125 10:05:41.293605 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ff8162a-1d7e-4415-b231-5d09857454cb" containerName="mariadb-database-create" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.293657 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ff8162a-1d7e-4415-b231-5d09857454cb" containerName="mariadb-database-create" Nov 25 10:05:41 crc kubenswrapper[4769]: E1125 10:05:41.293702 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c0172ef-616f-4636-b51f-4e534a850822" containerName="mariadb-database-create" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.293711 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c0172ef-616f-4636-b51f-4e534a850822" containerName="mariadb-database-create" Nov 25 10:05:41 crc kubenswrapper[4769]: E1125 10:05:41.293741 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bc94902-4625-4198-a28e-a52f51888eea" containerName="mariadb-account-create" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.293750 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bc94902-4625-4198-a28e-a52f51888eea" containerName="mariadb-account-create" Nov 25 10:05:41 crc kubenswrapper[4769]: E1125 10:05:41.293765 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5155b12-87cf-4695-a19c-d54322926320" containerName="mariadb-account-create" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.293774 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5155b12-87cf-4695-a19c-d54322926320" containerName="mariadb-account-create" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.294180 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c0172ef-616f-4636-b51f-4e534a850822" containerName="mariadb-database-create" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.294205 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ff8162a-1d7e-4415-b231-5d09857454cb" containerName="mariadb-database-create" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.294225 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bc94902-4625-4198-a28e-a52f51888eea" containerName="mariadb-account-create" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.294243 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5155b12-87cf-4695-a19c-d54322926320" containerName="mariadb-account-create" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.295259 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.295268 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5155b12-87cf-4695-a19c-d54322926320-operator-scripts\") pod \"c5155b12-87cf-4695-a19c-d54322926320\" (UID: \"c5155b12-87cf-4695-a19c-d54322926320\") " Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.295307 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njmcg\" (UniqueName: \"kubernetes.io/projected/c5155b12-87cf-4695-a19c-d54322926320-kube-api-access-njmcg\") pod \"c5155b12-87cf-4695-a19c-d54322926320\" (UID: \"c5155b12-87cf-4695-a19c-d54322926320\") " Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.298347 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5155b12-87cf-4695-a19c-d54322926320-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c5155b12-87cf-4695-a19c-d54322926320" (UID: "c5155b12-87cf-4695-a19c-d54322926320"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.300508 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.301081 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.311861 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5155b12-87cf-4695-a19c-d54322926320-kube-api-access-njmcg" (OuterVolumeSpecName: "kube-api-access-njmcg") pod "c5155b12-87cf-4695-a19c-d54322926320" (UID: "c5155b12-87cf-4695-a19c-d54322926320"). InnerVolumeSpecName "kube-api-access-njmcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.398706 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cbad0ab-1254-496a-8d0c-88fc840cf17e-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\") " pod="openstack/mysqld-exporter-0" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.399262 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8bxf\" (UniqueName: \"kubernetes.io/projected/6cbad0ab-1254-496a-8d0c-88fc840cf17e-kube-api-access-p8bxf\") pod \"mysqld-exporter-0\" (UID: \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\") " pod="openstack/mysqld-exporter-0" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.399385 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cbad0ab-1254-496a-8d0c-88fc840cf17e-config-data\") pod \"mysqld-exporter-0\" (UID: \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\") " pod="openstack/mysqld-exporter-0" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.399566 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5155b12-87cf-4695-a19c-d54322926320-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.399591 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njmcg\" (UniqueName: \"kubernetes.io/projected/c5155b12-87cf-4695-a19c-d54322926320-kube-api-access-njmcg\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.501700 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cbad0ab-1254-496a-8d0c-88fc840cf17e-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\") " pod="openstack/mysqld-exporter-0" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.501789 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8bxf\" (UniqueName: \"kubernetes.io/projected/6cbad0ab-1254-496a-8d0c-88fc840cf17e-kube-api-access-p8bxf\") pod \"mysqld-exporter-0\" (UID: \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\") " pod="openstack/mysqld-exporter-0" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.501862 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cbad0ab-1254-496a-8d0c-88fc840cf17e-config-data\") pod \"mysqld-exporter-0\" (UID: \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\") " pod="openstack/mysqld-exporter-0" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.509215 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cbad0ab-1254-496a-8d0c-88fc840cf17e-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\") " pod="openstack/mysqld-exporter-0" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.511729 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cbad0ab-1254-496a-8d0c-88fc840cf17e-config-data\") pod \"mysqld-exporter-0\" (UID: \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\") " pod="openstack/mysqld-exporter-0" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.534049 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8bxf\" (UniqueName: \"kubernetes.io/projected/6cbad0ab-1254-496a-8d0c-88fc840cf17e-kube-api-access-p8bxf\") pod \"mysqld-exporter-0\" (UID: \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\") " pod="openstack/mysqld-exporter-0" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.757277 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.918149 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-498c-account-create-4dtkm" event={"ID":"c5155b12-87cf-4695-a19c-d54322926320","Type":"ContainerDied","Data":"67a943a283eb4fea587ac4d39731052127e14cd83f9336db694ef8b83042a834"} Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.919356 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="67a943a283eb4fea587ac4d39731052127e14cd83f9336db694ef8b83042a834" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.919504 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-498c-account-create-4dtkm" Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.931759 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"9b7f1fec188bf24286d84ccea93ac229566b9a9674b6ae565ea70918d76f44f2"} Nov 25 10:05:41 crc kubenswrapper[4769]: I1125 10:05:41.931809 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"3d3d46ed6370631752066e0754a82283c7fbcd870ee44cbd17ecfc6da4febdee"} Nov 25 10:05:42 crc kubenswrapper[4769]: I1125 10:05:42.217110 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.272147 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-tnz6t" Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.981127 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8r5jb" event={"ID":"a4223d39-07d7-421b-b3b7-953758e77444","Type":"ContainerDied","Data":"b2e86c6d6eb8a09cd7f236729edd324a0ad8eaf534f3d1b393e42ee43d16556d"} Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.981524 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2e86c6d6eb8a09cd7f236729edd324a0ad8eaf534f3d1b393e42ee43d16556d" Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.983611 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-44vlp" event={"ID":"25f32f1a-8af2-4632-8a1e-689576d2d17b","Type":"ContainerDied","Data":"7aa50de0e83672613acfcfeb8f49da9d90c5e843c68c36d6bab6c7562f0c97c1"} Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.983678 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7aa50de0e83672613acfcfeb8f49da9d90c5e843c68c36d6bab6c7562f0c97c1" Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.986161 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-dea0-account-create-8ns6l" event={"ID":"11fbb390-f40d-4098-adab-b13e53b51cc8","Type":"ContainerDied","Data":"8f31f893b7c5b2e2076730a3bb2957a237c98909f75adb6ac049ca50d214dc15"} Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.986197 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f31f893b7c5b2e2076730a3bb2957a237c98909f75adb6ac049ca50d214dc15" Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.989523 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-1a9a-account-create-v2rj6" event={"ID":"27cb33b2-18c8-4d25-a441-4bdc64d8bfde","Type":"ContainerDied","Data":"7820525cfd6c4a7b860ea8ba995c6970ab7110bd4753af0028ae4f047a32dacb"} Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.989569 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7820525cfd6c4a7b860ea8ba995c6970ab7110bd4753af0028ae4f047a32dacb" Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.991359 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-e28d-account-create-w5wr6" event={"ID":"d2550677-acee-408a-86c9-149a068d10a1","Type":"ContainerDied","Data":"2901be68c28b8f5d76741723baa76bab6ecf2dfc29fa36f2fbd0c83292d5fbad"} Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.991413 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2901be68c28b8f5d76741723baa76bab6ecf2dfc29fa36f2fbd0c83292d5fbad" Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.993479 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnz6t-config-8qh9g" event={"ID":"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02","Type":"ContainerDied","Data":"0631db82193899290a4026e5fc5447980b28388b0ec40d2d9133f756bd245345"} Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.993520 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0631db82193899290a4026e5fc5447980b28388b0ec40d2d9133f756bd245345" Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.994986 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-kd9nw" event={"ID":"192781af-b5a8-4804-b3a9-a53290328c8c","Type":"ContainerDied","Data":"afa18bd37fd361d96f017ecd7a3af0fda8d51eb93ec4693ea70b6a5e02a23f9c"} Nov 25 10:05:44 crc kubenswrapper[4769]: I1125 10:05:44.995014 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afa18bd37fd361d96f017ecd7a3af0fda8d51eb93ec4693ea70b6a5e02a23f9c" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.184323 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-1a9a-account-create-v2rj6" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.198907 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-kd9nw" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.218005 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8r5jb" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.237929 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-44vlp" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.246135 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.266404 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-dea0-account-create-8ns6l" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.267378 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/192781af-b5a8-4804-b3a9-a53290328c8c-operator-scripts\") pod \"192781af-b5a8-4804-b3a9-a53290328c8c\" (UID: \"192781af-b5a8-4804-b3a9-a53290328c8c\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.267479 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27cb33b2-18c8-4d25-a441-4bdc64d8bfde-operator-scripts\") pod \"27cb33b2-18c8-4d25-a441-4bdc64d8bfde\" (UID: \"27cb33b2-18c8-4d25-a441-4bdc64d8bfde\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.267510 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsr84\" (UniqueName: \"kubernetes.io/projected/192781af-b5a8-4804-b3a9-a53290328c8c-kube-api-access-xsr84\") pod \"192781af-b5a8-4804-b3a9-a53290328c8c\" (UID: \"192781af-b5a8-4804-b3a9-a53290328c8c\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.267666 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqphd\" (UniqueName: \"kubernetes.io/projected/27cb33b2-18c8-4d25-a441-4bdc64d8bfde-kube-api-access-jqphd\") pod \"27cb33b2-18c8-4d25-a441-4bdc64d8bfde\" (UID: \"27cb33b2-18c8-4d25-a441-4bdc64d8bfde\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.268934 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27cb33b2-18c8-4d25-a441-4bdc64d8bfde-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "27cb33b2-18c8-4d25-a441-4bdc64d8bfde" (UID: "27cb33b2-18c8-4d25-a441-4bdc64d8bfde"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.269921 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/192781af-b5a8-4804-b3a9-a53290328c8c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "192781af-b5a8-4804-b3a9-a53290328c8c" (UID: "192781af-b5a8-4804-b3a9-a53290328c8c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.271200 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-e28d-account-create-w5wr6" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.277098 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/192781af-b5a8-4804-b3a9-a53290328c8c-kube-api-access-xsr84" (OuterVolumeSpecName: "kube-api-access-xsr84") pod "192781af-b5a8-4804-b3a9-a53290328c8c" (UID: "192781af-b5a8-4804-b3a9-a53290328c8c"). InnerVolumeSpecName "kube-api-access-xsr84". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.278801 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27cb33b2-18c8-4d25-a441-4bdc64d8bfde-kube-api-access-jqphd" (OuterVolumeSpecName: "kube-api-access-jqphd") pod "27cb33b2-18c8-4d25-a441-4bdc64d8bfde" (UID: "27cb33b2-18c8-4d25-a441-4bdc64d8bfde"). InnerVolumeSpecName "kube-api-access-jqphd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.370112 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2550677-acee-408a-86c9-149a068d10a1-operator-scripts\") pod \"d2550677-acee-408a-86c9-149a068d10a1\" (UID: \"d2550677-acee-408a-86c9-149a068d10a1\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.370210 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n28ft\" (UniqueName: \"kubernetes.io/projected/11fbb390-f40d-4098-adab-b13e53b51cc8-kube-api-access-n28ft\") pod \"11fbb390-f40d-4098-adab-b13e53b51cc8\" (UID: \"11fbb390-f40d-4098-adab-b13e53b51cc8\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.370266 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4hpd\" (UniqueName: \"kubernetes.io/projected/a4223d39-07d7-421b-b3b7-953758e77444-kube-api-access-f4hpd\") pod \"a4223d39-07d7-421b-b3b7-953758e77444\" (UID: \"a4223d39-07d7-421b-b3b7-953758e77444\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.370386 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-scripts\") pod \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.370419 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-log-ovn\") pod \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.370795 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nclb9\" (UniqueName: \"kubernetes.io/projected/d2550677-acee-408a-86c9-149a068d10a1-kube-api-access-nclb9\") pod \"d2550677-acee-408a-86c9-149a068d10a1\" (UID: \"d2550677-acee-408a-86c9-149a068d10a1\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.370874 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4223d39-07d7-421b-b3b7-953758e77444-operator-scripts\") pod \"a4223d39-07d7-421b-b3b7-953758e77444\" (UID: \"a4223d39-07d7-421b-b3b7-953758e77444\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.371558 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-additional-scripts\") pod \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.371617 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kn2wz\" (UniqueName: \"kubernetes.io/projected/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-kube-api-access-kn2wz\") pod \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.371674 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-run\") pod \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.371788 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8s4t\" (UniqueName: \"kubernetes.io/projected/25f32f1a-8af2-4632-8a1e-689576d2d17b-kube-api-access-z8s4t\") pod \"25f32f1a-8af2-4632-8a1e-689576d2d17b\" (UID: \"25f32f1a-8af2-4632-8a1e-689576d2d17b\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.371917 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-run-ovn\") pod \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\" (UID: \"5b7f14f6-2e75-4bf4-9c6c-3e3599047d02\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.371955 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25f32f1a-8af2-4632-8a1e-689576d2d17b-operator-scripts\") pod \"25f32f1a-8af2-4632-8a1e-689576d2d17b\" (UID: \"25f32f1a-8af2-4632-8a1e-689576d2d17b\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.372004 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fbb390-f40d-4098-adab-b13e53b51cc8-operator-scripts\") pod \"11fbb390-f40d-4098-adab-b13e53b51cc8\" (UID: \"11fbb390-f40d-4098-adab-b13e53b51cc8\") " Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.372905 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-scripts" (OuterVolumeSpecName: "scripts") pod "5b7f14f6-2e75-4bf4-9c6c-3e3599047d02" (UID: "5b7f14f6-2e75-4bf4-9c6c-3e3599047d02"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.372946 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqphd\" (UniqueName: \"kubernetes.io/projected/27cb33b2-18c8-4d25-a441-4bdc64d8bfde-kube-api-access-jqphd\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.372988 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/192781af-b5a8-4804-b3a9-a53290328c8c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.373002 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27cb33b2-18c8-4d25-a441-4bdc64d8bfde-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.373014 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsr84\" (UniqueName: \"kubernetes.io/projected/192781af-b5a8-4804-b3a9-a53290328c8c-kube-api-access-xsr84\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.373944 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11fbb390-f40d-4098-adab-b13e53b51cc8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "11fbb390-f40d-4098-adab-b13e53b51cc8" (UID: "11fbb390-f40d-4098-adab-b13e53b51cc8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.374043 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-run" (OuterVolumeSpecName: "var-run") pod "5b7f14f6-2e75-4bf4-9c6c-3e3599047d02" (UID: "5b7f14f6-2e75-4bf4-9c6c-3e3599047d02"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.375270 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11fbb390-f40d-4098-adab-b13e53b51cc8-kube-api-access-n28ft" (OuterVolumeSpecName: "kube-api-access-n28ft") pod "11fbb390-f40d-4098-adab-b13e53b51cc8" (UID: "11fbb390-f40d-4098-adab-b13e53b51cc8"). InnerVolumeSpecName "kube-api-access-n28ft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.375280 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4223d39-07d7-421b-b3b7-953758e77444-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a4223d39-07d7-421b-b3b7-953758e77444" (UID: "a4223d39-07d7-421b-b3b7-953758e77444"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.375336 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "5b7f14f6-2e75-4bf4-9c6c-3e3599047d02" (UID: "5b7f14f6-2e75-4bf4-9c6c-3e3599047d02"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.375337 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "5b7f14f6-2e75-4bf4-9c6c-3e3599047d02" (UID: "5b7f14f6-2e75-4bf4-9c6c-3e3599047d02"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.376539 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "5b7f14f6-2e75-4bf4-9c6c-3e3599047d02" (UID: "5b7f14f6-2e75-4bf4-9c6c-3e3599047d02"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.376555 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25f32f1a-8af2-4632-8a1e-689576d2d17b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "25f32f1a-8af2-4632-8a1e-689576d2d17b" (UID: "25f32f1a-8af2-4632-8a1e-689576d2d17b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.377246 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-kube-api-access-kn2wz" (OuterVolumeSpecName: "kube-api-access-kn2wz") pod "5b7f14f6-2e75-4bf4-9c6c-3e3599047d02" (UID: "5b7f14f6-2e75-4bf4-9c6c-3e3599047d02"). InnerVolumeSpecName "kube-api-access-kn2wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.377865 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2550677-acee-408a-86c9-149a068d10a1-kube-api-access-nclb9" (OuterVolumeSpecName: "kube-api-access-nclb9") pod "d2550677-acee-408a-86c9-149a068d10a1" (UID: "d2550677-acee-408a-86c9-149a068d10a1"). InnerVolumeSpecName "kube-api-access-nclb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.377992 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2550677-acee-408a-86c9-149a068d10a1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d2550677-acee-408a-86c9-149a068d10a1" (UID: "d2550677-acee-408a-86c9-149a068d10a1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.379198 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4223d39-07d7-421b-b3b7-953758e77444-kube-api-access-f4hpd" (OuterVolumeSpecName: "kube-api-access-f4hpd") pod "a4223d39-07d7-421b-b3b7-953758e77444" (UID: "a4223d39-07d7-421b-b3b7-953758e77444"). InnerVolumeSpecName "kube-api-access-f4hpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.379257 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25f32f1a-8af2-4632-8a1e-689576d2d17b-kube-api-access-z8s4t" (OuterVolumeSpecName: "kube-api-access-z8s4t") pod "25f32f1a-8af2-4632-8a1e-689576d2d17b" (UID: "25f32f1a-8af2-4632-8a1e-689576d2d17b"). InnerVolumeSpecName "kube-api-access-z8s4t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.448806 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:05:45 crc kubenswrapper[4769]: W1125 10:05:45.455370 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cbad0ab_1254_496a_8d0c_88fc840cf17e.slice/crio-13f1ff57f73652a98cbeb633de1fb3c21add5b57253754deb465bde42627b857 WatchSource:0}: Error finding container 13f1ff57f73652a98cbeb633de1fb3c21add5b57253754deb465bde42627b857: Status 404 returned error can't find the container with id 13f1ff57f73652a98cbeb633de1fb3c21add5b57253754deb465bde42627b857 Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.481810 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2550677-acee-408a-86c9-149a068d10a1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.481891 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n28ft\" (UniqueName: \"kubernetes.io/projected/11fbb390-f40d-4098-adab-b13e53b51cc8-kube-api-access-n28ft\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.481906 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4hpd\" (UniqueName: \"kubernetes.io/projected/a4223d39-07d7-421b-b3b7-953758e77444-kube-api-access-f4hpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.481920 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.481929 4769 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.481937 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nclb9\" (UniqueName: \"kubernetes.io/projected/d2550677-acee-408a-86c9-149a068d10a1-kube-api-access-nclb9\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.481946 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4223d39-07d7-421b-b3b7-953758e77444-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.481995 4769 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.482006 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kn2wz\" (UniqueName: \"kubernetes.io/projected/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-kube-api-access-kn2wz\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.482018 4769 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.482027 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8s4t\" (UniqueName: \"kubernetes.io/projected/25f32f1a-8af2-4632-8a1e-689576d2d17b-kube-api-access-z8s4t\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.482036 4769 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.482044 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/25f32f1a-8af2-4632-8a1e-689576d2d17b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:45 crc kubenswrapper[4769]: I1125 10:05:45.482071 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11fbb390-f40d-4098-adab-b13e53b51cc8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.008940 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"1686b0990f90958cda9bc520a052f9cb68c66e05e13df9addabc2f4157d15cc2"} Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.011550 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-kt2fl" event={"ID":"e5719295-a2f1-4d2f-a021-1da5411acbc8","Type":"ContainerStarted","Data":"096dec0bff515f769bec47c9874a06e3620cb9075a0534fbd7b6e776e814dcac"} Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.013240 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"6cbad0ab-1254-496a-8d0c-88fc840cf17e","Type":"ContainerStarted","Data":"13f1ff57f73652a98cbeb633de1fb3c21add5b57253754deb465bde42627b857"} Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.013290 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-44vlp" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.013294 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-dea0-account-create-8ns6l" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.013326 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnz6t-config-8qh9g" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.013329 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-e28d-account-create-w5wr6" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.013334 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8r5jb" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.013334 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-1a9a-account-create-v2rj6" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.014081 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-kd9nw" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.044945 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-kt2fl" podStartSLOduration=4.710144539 podStartE2EDuration="11.044920877s" podCreationTimestamp="2025-11-25 10:05:35 +0000 UTC" firstStartedPulling="2025-11-25 10:05:38.66322881 +0000 UTC m=+1287.248201123" lastFinishedPulling="2025-11-25 10:05:44.998005148 +0000 UTC m=+1293.582977461" observedRunningTime="2025-11-25 10:05:46.033233072 +0000 UTC m=+1294.618205405" watchObservedRunningTime="2025-11-25 10:05:46.044920877 +0000 UTC m=+1294.629893190" Nov 25 10:05:46 crc kubenswrapper[4769]: E1125 10:05:46.179407 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b7f14f6_2e75_4bf4_9c6c_3e3599047d02.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod11fbb390_f40d_4098_adab_b13e53b51cc8.slice/crio-8f31f893b7c5b2e2076730a3bb2957a237c98909f75adb6ac049ca50d214dc15\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27cb33b2_18c8_4d25_a441_4bdc64d8bfde.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27cb33b2_18c8_4d25_a441_4bdc64d8bfde.slice/crio-7820525cfd6c4a7b860ea8ba995c6970ab7110bd4753af0028ae4f047a32dacb\": RecentStats: unable to find data in memory cache]" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.380526 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-tnz6t-config-8qh9g"] Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.439449 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-tnz6t-config-8qh9g"] Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.483610 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-tnz6t-config-tk4p5"] Nov 25 10:05:46 crc kubenswrapper[4769]: E1125 10:05:46.484308 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25f32f1a-8af2-4632-8a1e-689576d2d17b" containerName="mariadb-database-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.484336 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="25f32f1a-8af2-4632-8a1e-689576d2d17b" containerName="mariadb-database-create" Nov 25 10:05:46 crc kubenswrapper[4769]: E1125 10:05:46.485889 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4223d39-07d7-421b-b3b7-953758e77444" containerName="mariadb-database-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.485900 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4223d39-07d7-421b-b3b7-953758e77444" containerName="mariadb-database-create" Nov 25 10:05:46 crc kubenswrapper[4769]: E1125 10:05:46.485921 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11fbb390-f40d-4098-adab-b13e53b51cc8" containerName="mariadb-account-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.485931 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="11fbb390-f40d-4098-adab-b13e53b51cc8" containerName="mariadb-account-create" Nov 25 10:05:46 crc kubenswrapper[4769]: E1125 10:05:46.485944 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2550677-acee-408a-86c9-149a068d10a1" containerName="mariadb-account-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.485954 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2550677-acee-408a-86c9-149a068d10a1" containerName="mariadb-account-create" Nov 25 10:05:46 crc kubenswrapper[4769]: E1125 10:05:46.485988 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="192781af-b5a8-4804-b3a9-a53290328c8c" containerName="mariadb-database-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.485996 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="192781af-b5a8-4804-b3a9-a53290328c8c" containerName="mariadb-database-create" Nov 25 10:05:46 crc kubenswrapper[4769]: E1125 10:05:46.486020 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b7f14f6-2e75-4bf4-9c6c-3e3599047d02" containerName="ovn-config" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.486028 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b7f14f6-2e75-4bf4-9c6c-3e3599047d02" containerName="ovn-config" Nov 25 10:05:46 crc kubenswrapper[4769]: E1125 10:05:46.486057 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27cb33b2-18c8-4d25-a441-4bdc64d8bfde" containerName="mariadb-account-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.486065 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="27cb33b2-18c8-4d25-a441-4bdc64d8bfde" containerName="mariadb-account-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.486412 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4223d39-07d7-421b-b3b7-953758e77444" containerName="mariadb-database-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.486438 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2550677-acee-408a-86c9-149a068d10a1" containerName="mariadb-account-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.486448 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="27cb33b2-18c8-4d25-a441-4bdc64d8bfde" containerName="mariadb-account-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.486457 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="25f32f1a-8af2-4632-8a1e-689576d2d17b" containerName="mariadb-database-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.486468 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="11fbb390-f40d-4098-adab-b13e53b51cc8" containerName="mariadb-account-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.486486 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b7f14f6-2e75-4bf4-9c6c-3e3599047d02" containerName="ovn-config" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.486497 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="192781af-b5a8-4804-b3a9-a53290328c8c" containerName="mariadb-database-create" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.487464 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.493438 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.493656 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tnz6t-config-tk4p5"] Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.618801 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4e7c7e4a-8aad-4693-81de-0029a90c48ac-additional-scripts\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.619040 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkgmx\" (UniqueName: \"kubernetes.io/projected/4e7c7e4a-8aad-4693-81de-0029a90c48ac-kube-api-access-gkgmx\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.619119 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-run\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.619242 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e7c7e4a-8aad-4693-81de-0029a90c48ac-scripts\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.619380 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-log-ovn\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.619458 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-run-ovn\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.722538 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4e7c7e4a-8aad-4693-81de-0029a90c48ac-additional-scripts\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.722632 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkgmx\" (UniqueName: \"kubernetes.io/projected/4e7c7e4a-8aad-4693-81de-0029a90c48ac-kube-api-access-gkgmx\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.722662 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-run\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.722697 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e7c7e4a-8aad-4693-81de-0029a90c48ac-scripts\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.722737 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-log-ovn\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.722765 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-run-ovn\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.723094 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-run-ovn\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.723118 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-run\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.723243 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-log-ovn\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.723508 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4e7c7e4a-8aad-4693-81de-0029a90c48ac-additional-scripts\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.725377 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e7c7e4a-8aad-4693-81de-0029a90c48ac-scripts\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.757419 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkgmx\" (UniqueName: \"kubernetes.io/projected/4e7c7e4a-8aad-4693-81de-0029a90c48ac-kube-api-access-gkgmx\") pod \"ovn-controller-tnz6t-config-tk4p5\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:46 crc kubenswrapper[4769]: I1125 10:05:46.819545 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:05:47 crc kubenswrapper[4769]: I1125 10:05:47.222466 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:47 crc kubenswrapper[4769]: I1125 10:05:47.226177 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:48 crc kubenswrapper[4769]: I1125 10:05:48.036764 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:48 crc kubenswrapper[4769]: I1125 10:05:48.255454 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b7f14f6-2e75-4bf4-9c6c-3e3599047d02" path="/var/lib/kubelet/pods/5b7f14f6-2e75-4bf4-9c6c-3e3599047d02/volumes" Nov 25 10:05:51 crc kubenswrapper[4769]: I1125 10:05:51.050178 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:05:51 crc kubenswrapper[4769]: I1125 10:05:51.050859 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="prometheus" containerID="cri-o://be4b19b044f5a9e7a9f93745ce27d7018083d4e1a0a9385de3b6fb62e1968c17" gracePeriod=600 Nov 25 10:05:51 crc kubenswrapper[4769]: I1125 10:05:51.051017 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="thanos-sidecar" containerID="cri-o://fd15f4c6c259b869ec7062dcb9d286aec188d93849849584f7ec7ab2cedcd01d" gracePeriod=600 Nov 25 10:05:51 crc kubenswrapper[4769]: I1125 10:05:51.051055 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="config-reloader" containerID="cri-o://8e001632f7acf9d0ae2e2b9606945759d0ddd5febb51566939358a5d94eba13a" gracePeriod=600 Nov 25 10:05:52 crc kubenswrapper[4769]: I1125 10:05:52.080849 4769 generic.go:334] "Generic (PLEG): container finished" podID="e5719295-a2f1-4d2f-a021-1da5411acbc8" containerID="096dec0bff515f769bec47c9874a06e3620cb9075a0534fbd7b6e776e814dcac" exitCode=0 Nov 25 10:05:52 crc kubenswrapper[4769]: I1125 10:05:52.080936 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-kt2fl" event={"ID":"e5719295-a2f1-4d2f-a021-1da5411acbc8","Type":"ContainerDied","Data":"096dec0bff515f769bec47c9874a06e3620cb9075a0534fbd7b6e776e814dcac"} Nov 25 10:05:52 crc kubenswrapper[4769]: I1125 10:05:52.085928 4769 generic.go:334] "Generic (PLEG): container finished" podID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerID="fd15f4c6c259b869ec7062dcb9d286aec188d93849849584f7ec7ab2cedcd01d" exitCode=0 Nov 25 10:05:52 crc kubenswrapper[4769]: I1125 10:05:52.085953 4769 generic.go:334] "Generic (PLEG): container finished" podID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerID="8e001632f7acf9d0ae2e2b9606945759d0ddd5febb51566939358a5d94eba13a" exitCode=0 Nov 25 10:05:52 crc kubenswrapper[4769]: I1125 10:05:52.085983 4769 generic.go:334] "Generic (PLEG): container finished" podID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerID="be4b19b044f5a9e7a9f93745ce27d7018083d4e1a0a9385de3b6fb62e1968c17" exitCode=0 Nov 25 10:05:52 crc kubenswrapper[4769]: I1125 10:05:52.086004 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7","Type":"ContainerDied","Data":"fd15f4c6c259b869ec7062dcb9d286aec188d93849849584f7ec7ab2cedcd01d"} Nov 25 10:05:52 crc kubenswrapper[4769]: I1125 10:05:52.086024 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7","Type":"ContainerDied","Data":"8e001632f7acf9d0ae2e2b9606945759d0ddd5febb51566939358a5d94eba13a"} Nov 25 10:05:52 crc kubenswrapper[4769]: I1125 10:05:52.086038 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7","Type":"ContainerDied","Data":"be4b19b044f5a9e7a9f93745ce27d7018083d4e1a0a9385de3b6fb62e1968c17"} Nov 25 10:05:52 crc kubenswrapper[4769]: I1125 10:05:52.217718 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.140:9090/-/ready\": dial tcp 10.217.0.140:9090: connect: connection refused" Nov 25 10:05:52 crc kubenswrapper[4769]: I1125 10:05:52.290516 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:05:52 crc kubenswrapper[4769]: I1125 10:05:52.290642 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.151793 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-kt2fl" event={"ID":"e5719295-a2f1-4d2f-a021-1da5411acbc8","Type":"ContainerDied","Data":"ce4bd62f83937bd24de5863470058c9e11e0f89c573fb87b85306125a67c3ccf"} Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.152475 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce4bd62f83937bd24de5863470058c9e11e0f89c573fb87b85306125a67c3ccf" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.156264 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.221799 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hcdfk\" (UniqueName: \"kubernetes.io/projected/e5719295-a2f1-4d2f-a021-1da5411acbc8-kube-api-access-hcdfk\") pod \"e5719295-a2f1-4d2f-a021-1da5411acbc8\" (UID: \"e5719295-a2f1-4d2f-a021-1da5411acbc8\") " Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.222613 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5719295-a2f1-4d2f-a021-1da5411acbc8-config-data\") pod \"e5719295-a2f1-4d2f-a021-1da5411acbc8\" (UID: \"e5719295-a2f1-4d2f-a021-1da5411acbc8\") " Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.222656 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5719295-a2f1-4d2f-a021-1da5411acbc8-combined-ca-bundle\") pod \"e5719295-a2f1-4d2f-a021-1da5411acbc8\" (UID: \"e5719295-a2f1-4d2f-a021-1da5411acbc8\") " Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.230414 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5719295-a2f1-4d2f-a021-1da5411acbc8-kube-api-access-hcdfk" (OuterVolumeSpecName: "kube-api-access-hcdfk") pod "e5719295-a2f1-4d2f-a021-1da5411acbc8" (UID: "e5719295-a2f1-4d2f-a021-1da5411acbc8"). InnerVolumeSpecName "kube-api-access-hcdfk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.270258 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5719295-a2f1-4d2f-a021-1da5411acbc8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e5719295-a2f1-4d2f-a021-1da5411acbc8" (UID: "e5719295-a2f1-4d2f-a021-1da5411acbc8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.313653 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5719295-a2f1-4d2f-a021-1da5411acbc8-config-data" (OuterVolumeSpecName: "config-data") pod "e5719295-a2f1-4d2f-a021-1da5411acbc8" (UID: "e5719295-a2f1-4d2f-a021-1da5411acbc8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.326509 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5719295-a2f1-4d2f-a021-1da5411acbc8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.326540 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5719295-a2f1-4d2f-a021-1da5411acbc8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.326555 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hcdfk\" (UniqueName: \"kubernetes.io/projected/e5719295-a2f1-4d2f-a021-1da5411acbc8-kube-api-access-hcdfk\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.658692 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-tnz6t-config-tk4p5"] Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.849382 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.941728 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-prometheus-metric-storage-rulefiles-0\") pod \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.942274 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-config\") pod \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.942401 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-config-out\") pod \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.942459 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdhdk\" (UniqueName: \"kubernetes.io/projected/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-kube-api-access-vdhdk\") pod \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.942597 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\") pod \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.942673 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-web-config\") pod \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.942730 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-tls-assets\") pod \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.942766 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-thanos-prometheus-http-client-file\") pod \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\" (UID: \"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7\") " Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.942826 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" (UID: "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.943274 4769 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.948407 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" (UID: "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.950226 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-config" (OuterVolumeSpecName: "config") pod "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" (UID: "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.952125 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-config-out" (OuterVolumeSpecName: "config-out") pod "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" (UID: "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.955355 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-kube-api-access-vdhdk" (OuterVolumeSpecName: "kube-api-access-vdhdk") pod "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" (UID: "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7"). InnerVolumeSpecName "kube-api-access-vdhdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.955417 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" (UID: "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:05:54 crc kubenswrapper[4769]: I1125 10:05:54.974712 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" (UID: "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7"). InnerVolumeSpecName "pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.008628 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-web-config" (OuterVolumeSpecName: "web-config") pod "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" (UID: "3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.047876 4769 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-config-out\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.047920 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdhdk\" (UniqueName: \"kubernetes.io/projected/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-kube-api-access-vdhdk\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.047980 4769 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\") on node \"crc\" " Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.047992 4769 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-web-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.048005 4769 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.048014 4769 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.048028 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.097726 4769 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.097994 4769 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977") on node "crc" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.150672 4769 reconciler_common.go:293] "Volume detached for volume \"pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\") on node \"crc\" DevicePath \"\"" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.167196 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jm2np" event={"ID":"28b97e59-606d-4810-bec5-cecbc1c691bc","Type":"ContainerStarted","Data":"b176b2b613fd303e61f4c5784dbcb2a06c08b8533cfc13538dd118b0c84b0a99"} Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.173459 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7","Type":"ContainerDied","Data":"b47fda3a9ca3e2463e0f5133548b16a13692519f2cde980e87301ac4180a5508"} Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.173506 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.173531 4769 scope.go:117] "RemoveContainer" containerID="fd15f4c6c259b869ec7062dcb9d286aec188d93849849584f7ec7ab2cedcd01d" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.173503 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-kt2fl" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.205751 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-jm2np" podStartSLOduration=6.589394603 podStartE2EDuration="22.205725773s" podCreationTimestamp="2025-11-25 10:05:33 +0000 UTC" firstStartedPulling="2025-11-25 10:05:38.555809875 +0000 UTC m=+1287.140782188" lastFinishedPulling="2025-11-25 10:05:54.172141035 +0000 UTC m=+1302.757113358" observedRunningTime="2025-11-25 10:05:55.187337864 +0000 UTC m=+1303.772310177" watchObservedRunningTime="2025-11-25 10:05:55.205725773 +0000 UTC m=+1303.790698086" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.247329 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.260217 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.272440 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:05:55 crc kubenswrapper[4769]: E1125 10:05:55.273091 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="init-config-reloader" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.273117 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="init-config-reloader" Nov 25 10:05:55 crc kubenswrapper[4769]: E1125 10:05:55.273145 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="thanos-sidecar" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.273153 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="thanos-sidecar" Nov 25 10:05:55 crc kubenswrapper[4769]: E1125 10:05:55.273174 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5719295-a2f1-4d2f-a021-1da5411acbc8" containerName="keystone-db-sync" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.273180 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5719295-a2f1-4d2f-a021-1da5411acbc8" containerName="keystone-db-sync" Nov 25 10:05:55 crc kubenswrapper[4769]: E1125 10:05:55.273201 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="prometheus" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.273208 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="prometheus" Nov 25 10:05:55 crc kubenswrapper[4769]: E1125 10:05:55.273229 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="config-reloader" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.273234 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="config-reloader" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.273487 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="config-reloader" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.273506 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5719295-a2f1-4d2f-a021-1da5411acbc8" containerName="keystone-db-sync" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.273527 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="thanos-sidecar" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.273546 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" containerName="prometheus" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.276379 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.282856 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.283141 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.283505 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-rgbvj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.284053 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.284345 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.284536 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.290845 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.292860 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.355676 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.355740 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.355772 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.355847 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2kcc\" (UniqueName: \"kubernetes.io/projected/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-kube-api-access-q2kcc\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.355883 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-config\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.355917 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.355934 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.356061 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.356098 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.356121 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.356160 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.459642 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2kcc\" (UniqueName: \"kubernetes.io/projected/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-kube-api-access-q2kcc\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.459732 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-config\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.459779 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.459802 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.459831 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.459865 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.459891 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.459938 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.460001 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.460044 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.460073 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.469417 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-config\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.475844 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-ttfzd"] Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.478324 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.478980 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.485654 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.490397 4769 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.490446 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e301c5bbf2eab765e231bfc88a124cfef4fd657b7d84a5151dae63c839ee7d53/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.495938 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.496270 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.497852 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.498573 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.509222 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2kcc\" (UniqueName: \"kubernetes.io/projected/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-kube-api-access-q2kcc\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.516438 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.519880 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8ef4c37f-e3dc-4994-92bf-b41e7c215ef1-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.544921 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-ttfzd"] Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.570209 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-config\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.570292 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-dns-svc\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.570372 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-ovsdbserver-nb\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.570461 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8zxj\" (UniqueName: \"kubernetes.io/projected/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-kube-api-access-h8zxj\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.570498 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-ovsdbserver-sb\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.591938 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-vbhsj"] Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.600431 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.602333 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.604948 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.605111 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.605286 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-bnc4t" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.605993 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.636285 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-vbhsj"] Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.674355 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8zxj\" (UniqueName: \"kubernetes.io/projected/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-kube-api-access-h8zxj\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.674425 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-fernet-keys\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.674459 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-ovsdbserver-sb\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.674515 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-config\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.674543 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-config-data\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.674574 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sn8m\" (UniqueName: \"kubernetes.io/projected/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-kube-api-access-7sn8m\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.674624 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-dns-svc\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.674674 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-credential-keys\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.674718 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-ovsdbserver-nb\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.674742 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-scripts\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.674772 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-combined-ca-bundle\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.686243 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-dns-svc\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.686767 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-config\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.687270 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-ovsdbserver-nb\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.690816 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-ovsdbserver-sb\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.690908 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-5tt9b"] Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.697632 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-5tt9b" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.708187 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-sc4ww" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.708455 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.709693 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8zxj\" (UniqueName: \"kubernetes.io/projected/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-kube-api-access-h8zxj\") pod \"dnsmasq-dns-f877ddd87-ttfzd\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.721495 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-77d4796a-3cc8-4516-b1f4-89b6f6883977\") pod \"prometheus-metric-storage-0\" (UID: \"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.753213 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-5tt9b"] Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.778790 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/639faafa-26cf-4b2e-831b-bc95e327cb3b-combined-ca-bundle\") pod \"heat-db-sync-5tt9b\" (UID: \"639faafa-26cf-4b2e-831b-bc95e327cb3b\") " pod="openstack/heat-db-sync-5tt9b" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.795274 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-credential-keys\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.795546 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-scripts\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.795635 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-combined-ca-bundle\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.795774 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5m4h2\" (UniqueName: \"kubernetes.io/projected/639faafa-26cf-4b2e-831b-bc95e327cb3b-kube-api-access-5m4h2\") pod \"heat-db-sync-5tt9b\" (UID: \"639faafa-26cf-4b2e-831b-bc95e327cb3b\") " pod="openstack/heat-db-sync-5tt9b" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.795945 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/639faafa-26cf-4b2e-831b-bc95e327cb3b-config-data\") pod \"heat-db-sync-5tt9b\" (UID: \"639faafa-26cf-4b2e-831b-bc95e327cb3b\") " pod="openstack/heat-db-sync-5tt9b" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.796101 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-fernet-keys\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.796274 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-config-data\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.796309 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sn8m\" (UniqueName: \"kubernetes.io/projected/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-kube-api-access-7sn8m\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.824508 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-lfvzs"] Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.826286 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.830522 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.830813 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.831046 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-66n2x" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.844606 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-credential-keys\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.845705 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-fernet-keys\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.846422 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-scripts\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.848697 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-combined-ca-bundle\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.851753 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sn8m\" (UniqueName: \"kubernetes.io/projected/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-kube-api-access-7sn8m\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.854051 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-lfvzs"] Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.857743 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-config-data\") pod \"keystone-bootstrap-vbhsj\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.911637 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-scripts\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.911710 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5m4h2\" (UniqueName: \"kubernetes.io/projected/639faafa-26cf-4b2e-831b-bc95e327cb3b-kube-api-access-5m4h2\") pod \"heat-db-sync-5tt9b\" (UID: \"639faafa-26cf-4b2e-831b-bc95e327cb3b\") " pod="openstack/heat-db-sync-5tt9b" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.911740 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-combined-ca-bundle\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.911834 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/639faafa-26cf-4b2e-831b-bc95e327cb3b-config-data\") pod \"heat-db-sync-5tt9b\" (UID: \"639faafa-26cf-4b2e-831b-bc95e327cb3b\") " pod="openstack/heat-db-sync-5tt9b" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.911982 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ed79e867-002b-4591-b757-0410b73a43ef-etc-machine-id\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.912130 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/639faafa-26cf-4b2e-831b-bc95e327cb3b-combined-ca-bundle\") pod \"heat-db-sync-5tt9b\" (UID: \"639faafa-26cf-4b2e-831b-bc95e327cb3b\") " pod="openstack/heat-db-sync-5tt9b" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.912210 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-db-sync-config-data\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.912447 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-config-data\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.912577 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rbnp\" (UniqueName: \"kubernetes.io/projected/ed79e867-002b-4591-b757-0410b73a43ef-kube-api-access-9rbnp\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.919114 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/639faafa-26cf-4b2e-831b-bc95e327cb3b-config-data\") pod \"heat-db-sync-5tt9b\" (UID: \"639faafa-26cf-4b2e-831b-bc95e327cb3b\") " pod="openstack/heat-db-sync-5tt9b" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.921214 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/639faafa-26cf-4b2e-831b-bc95e327cb3b-combined-ca-bundle\") pod \"heat-db-sync-5tt9b\" (UID: \"639faafa-26cf-4b2e-831b-bc95e327cb3b\") " pod="openstack/heat-db-sync-5tt9b" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.930309 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.945620 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.954409 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:05:55 crc kubenswrapper[4769]: I1125 10:05:55.971755 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-ttfzd"] Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:55.992255 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5m4h2\" (UniqueName: \"kubernetes.io/projected/639faafa-26cf-4b2e-831b-bc95e327cb3b-kube-api-access-5m4h2\") pod \"heat-db-sync-5tt9b\" (UID: \"639faafa-26cf-4b2e-831b-bc95e327cb3b\") " pod="openstack/heat-db-sync-5tt9b" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.016560 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-scripts\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.016609 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-combined-ca-bundle\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.036132 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ed79e867-002b-4591-b757-0410b73a43ef-etc-machine-id\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.036475 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-db-sync-config-data\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.036528 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-config-data\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.036556 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rbnp\" (UniqueName: \"kubernetes.io/projected/ed79e867-002b-4591-b757-0410b73a43ef-kube-api-access-9rbnp\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.042376 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ed79e867-002b-4591-b757-0410b73a43ef-etc-machine-id\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.053376 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-pvsgh"] Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.054911 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.056406 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-5tt9b" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.057796 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-combined-ca-bundle\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.061135 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-config-data\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.061635 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-db-sync-config-data\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.113003 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-scripts\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.113478 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rbnp\" (UniqueName: \"kubernetes.io/projected/ed79e867-002b-4591-b757-0410b73a43ef-kube-api-access-9rbnp\") pod \"cinder-db-sync-lfvzs\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.113950 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-6l42b" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.114447 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.196820 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.327330 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7" path="/var/lib/kubelet/pods/3fcb6ab7-bd77-4973-bfb1-fb46da1bd3d7/volumes" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.328557 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-9zbdp"] Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.348688 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-pvsgh"] Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.348734 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-9zbdp"] Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.348749 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnz6t-config-tk4p5" event={"ID":"4e7c7e4a-8aad-4693-81de-0029a90c48ac","Type":"ContainerStarted","Data":"a4fc419d0487c71b8041878737683002d852c5944c6c20daafa72b6cd319724d"} Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.348875 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.351878 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-scripts\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.352048 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-combined-ca-bundle\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.352106 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/175271fe-6677-49b5-b497-c45ef1816fb7-db-sync-config-data\") pod \"barbican-db-sync-pvsgh\" (UID: \"175271fe-6677-49b5-b497-c45ef1816fb7\") " pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.352190 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nw7t4\" (UniqueName: \"kubernetes.io/projected/175271fe-6677-49b5-b497-c45ef1816fb7-kube-api-access-nw7t4\") pod \"barbican-db-sync-pvsgh\" (UID: \"175271fe-6677-49b5-b497-c45ef1816fb7\") " pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.352216 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-config-data\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.352259 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvk5z\" (UniqueName: \"kubernetes.io/projected/29edc9fc-ffe1-4511-9f89-9d0795f063cb-kube-api-access-vvk5z\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.352294 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175271fe-6677-49b5-b497-c45ef1816fb7-combined-ca-bundle\") pod \"barbican-db-sync-pvsgh\" (UID: \"175271fe-6677-49b5-b497-c45ef1816fb7\") " pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.352309 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29edc9fc-ffe1-4511-9f89-9d0795f063cb-logs\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.356514 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.356773 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.367953 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-zhjgb" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.415619 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-v9lcm"] Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.434374 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.445891 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-jrgf9"] Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.450137 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.453881 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-combined-ca-bundle\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.453949 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/175271fe-6677-49b5-b497-c45ef1816fb7-db-sync-config-data\") pod \"barbican-db-sync-pvsgh\" (UID: \"175271fe-6677-49b5-b497-c45ef1816fb7\") " pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.454019 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nw7t4\" (UniqueName: \"kubernetes.io/projected/175271fe-6677-49b5-b497-c45ef1816fb7-kube-api-access-nw7t4\") pod \"barbican-db-sync-pvsgh\" (UID: \"175271fe-6677-49b5-b497-c45ef1816fb7\") " pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.454047 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-config-data\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.454081 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvk5z\" (UniqueName: \"kubernetes.io/projected/29edc9fc-ffe1-4511-9f89-9d0795f063cb-kube-api-access-vvk5z\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.454108 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29edc9fc-ffe1-4511-9f89-9d0795f063cb-logs\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.454125 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175271fe-6677-49b5-b497-c45ef1816fb7-combined-ca-bundle\") pod \"barbican-db-sync-pvsgh\" (UID: \"175271fe-6677-49b5-b497-c45ef1816fb7\") " pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.454179 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-scripts\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.457785 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29edc9fc-ffe1-4511-9f89-9d0795f063cb-logs\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.463553 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.463713 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-combined-ca-bundle\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.463856 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.464522 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-zhplb" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.466543 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175271fe-6677-49b5-b497-c45ef1816fb7-combined-ca-bundle\") pod \"barbican-db-sync-pvsgh\" (UID: \"175271fe-6677-49b5-b497-c45ef1816fb7\") " pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.467727 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-config-data\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.471592 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-scripts\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.477627 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-v9lcm"] Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.479542 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/175271fe-6677-49b5-b497-c45ef1816fb7-db-sync-config-data\") pod \"barbican-db-sync-pvsgh\" (UID: \"175271fe-6677-49b5-b497-c45ef1816fb7\") " pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.483371 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nw7t4\" (UniqueName: \"kubernetes.io/projected/175271fe-6677-49b5-b497-c45ef1816fb7-kube-api-access-nw7t4\") pod \"barbican-db-sync-pvsgh\" (UID: \"175271fe-6677-49b5-b497-c45ef1816fb7\") " pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.484068 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.491423 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvk5z\" (UniqueName: \"kubernetes.io/projected/29edc9fc-ffe1-4511-9f89-9d0795f063cb-kube-api-access-vvk5z\") pod \"placement-db-sync-9zbdp\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.507257 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-jrgf9"] Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.521464 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.541201 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.548610 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.548910 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.555969 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.559323 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4lst\" (UniqueName: \"kubernetes.io/projected/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-kube-api-access-r4lst\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.559404 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-config\") pod \"neutron-db-sync-jrgf9\" (UID: \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\") " pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.559433 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfwkc\" (UniqueName: \"kubernetes.io/projected/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-kube-api-access-rfwkc\") pod \"neutron-db-sync-jrgf9\" (UID: \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\") " pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.559455 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-dns-svc\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.559498 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-ovsdbserver-sb\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.559526 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-ovsdbserver-nb\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.559569 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-combined-ca-bundle\") pod \"neutron-db-sync-jrgf9\" (UID: \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\") " pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.559619 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-config\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.662805 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.663080 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-scripts\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.663224 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4lst\" (UniqueName: \"kubernetes.io/projected/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-kube-api-access-r4lst\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.663318 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-config\") pod \"neutron-db-sync-jrgf9\" (UID: \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\") " pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.663386 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfwkc\" (UniqueName: \"kubernetes.io/projected/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-kube-api-access-rfwkc\") pod \"neutron-db-sync-jrgf9\" (UID: \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\") " pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.663458 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-dns-svc\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.663666 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-ovsdbserver-sb\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.663767 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-ovsdbserver-nb\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.663860 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3373fd6b-9a0f-4268-8476-382740118f35-run-httpd\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.663904 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3373fd6b-9a0f-4268-8476-382740118f35-log-httpd\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.664086 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-combined-ca-bundle\") pod \"neutron-db-sync-jrgf9\" (UID: \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\") " pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.664150 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44v62\" (UniqueName: \"kubernetes.io/projected/3373fd6b-9a0f-4268-8476-382740118f35-kube-api-access-44v62\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.664406 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.664460 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-config\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.664669 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-config-data\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.665665 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-dns-svc\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.666538 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-ovsdbserver-nb\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.666642 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-ovsdbserver-sb\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.667175 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-config\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.671618 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-combined-ca-bundle\") pod \"neutron-db-sync-jrgf9\" (UID: \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\") " pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.673659 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-config\") pod \"neutron-db-sync-jrgf9\" (UID: \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\") " pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.683663 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4lst\" (UniqueName: \"kubernetes.io/projected/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-kube-api-access-r4lst\") pod \"dnsmasq-dns-68dcc9cf6f-v9lcm\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.686529 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfwkc\" (UniqueName: \"kubernetes.io/projected/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-kube-api-access-rfwkc\") pod \"neutron-db-sync-jrgf9\" (UID: \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\") " pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.723957 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-9zbdp" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.767519 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.767604 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-scripts\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.767761 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3373fd6b-9a0f-4268-8476-382740118f35-run-httpd\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.767786 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3373fd6b-9a0f-4268-8476-382740118f35-log-httpd\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.767837 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44v62\" (UniqueName: \"kubernetes.io/projected/3373fd6b-9a0f-4268-8476-382740118f35-kube-api-access-44v62\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.767892 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.767945 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-config-data\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.769477 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3373fd6b-9a0f-4268-8476-382740118f35-log-httpd\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.770420 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3373fd6b-9a0f-4268-8476-382740118f35-run-httpd\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.773725 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-scripts\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.774406 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.774934 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.784348 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-config-data\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.791016 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44v62\" (UniqueName: \"kubernetes.io/projected/3373fd6b-9a0f-4268-8476-382740118f35-kube-api-access-44v62\") pod \"ceilometer-0\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.903117 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.923395 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.931593 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:05:56 crc kubenswrapper[4769]: I1125 10:05:56.961660 4769 scope.go:117] "RemoveContainer" containerID="8e001632f7acf9d0ae2e2b9606945759d0ddd5febb51566939358a5d94eba13a" Nov 25 10:05:57 crc kubenswrapper[4769]: I1125 10:05:57.190703 4769 scope.go:117] "RemoveContainer" containerID="be4b19b044f5a9e7a9f93745ce27d7018083d4e1a0a9385de3b6fb62e1968c17" Nov 25 10:05:57 crc kubenswrapper[4769]: I1125 10:05:57.565303 4769 scope.go:117] "RemoveContainer" containerID="577c38364846cc0e6bff04d1a0928fad6b76b4becbf8bafa72df7fffac346f1f" Nov 25 10:05:57 crc kubenswrapper[4769]: I1125 10:05:57.945775 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-vbhsj"] Nov 25 10:05:57 crc kubenswrapper[4769]: W1125 10:05:57.953177 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa5947eb_7a68_40d9_8ea3_1dd6719dee8d.slice/crio-082e66335ce4e3a6ae2b3f843befe009ea2a04261145c6e0ac8a583c2da38120 WatchSource:0}: Error finding container 082e66335ce4e3a6ae2b3f843befe009ea2a04261145c6e0ac8a583c2da38120: Status 404 returned error can't find the container with id 082e66335ce4e3a6ae2b3f843befe009ea2a04261145c6e0ac8a583c2da38120 Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.408577 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.423272 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-5tt9b"] Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.438010 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-pvsgh"] Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.455868 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-vbhsj" event={"ID":"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d","Type":"ContainerStarted","Data":"082e66335ce4e3a6ae2b3f843befe009ea2a04261145c6e0ac8a583c2da38120"} Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.460719 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnz6t-config-tk4p5" event={"ID":"4e7c7e4a-8aad-4693-81de-0029a90c48ac","Type":"ContainerStarted","Data":"65f779b115981bb11398df135b35a40ffc144a6aa60ffe09482b5df5ada622eb"} Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.471044 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"6cbad0ab-1254-496a-8d0c-88fc840cf17e","Type":"ContainerStarted","Data":"e43a8548ed7a0c94c09fcda2049fa0001f813a7af236bfed1823fcdf19e6f902"} Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.480119 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-ttfzd"] Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.481015 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"835b3d7076deb0bb295c47f20363263aeef68f9f2d6c44ffbdf970504487bbaa"} Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.496620 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-tnz6t-config-tk4p5" podStartSLOduration=12.496591215 podStartE2EDuration="12.496591215s" podCreationTimestamp="2025-11-25 10:05:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:05:58.492505246 +0000 UTC m=+1307.077477559" watchObservedRunningTime="2025-11-25 10:05:58.496591215 +0000 UTC m=+1307.081563528" Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.543023 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=5.943641789 podStartE2EDuration="17.542991721s" podCreationTimestamp="2025-11-25 10:05:41 +0000 UTC" firstStartedPulling="2025-11-25 10:05:45.458036237 +0000 UTC m=+1294.043008560" lastFinishedPulling="2025-11-25 10:05:57.057386179 +0000 UTC m=+1305.642358492" observedRunningTime="2025-11-25 10:05:58.512503099 +0000 UTC m=+1307.097475412" watchObservedRunningTime="2025-11-25 10:05:58.542991721 +0000 UTC m=+1307.127964034" Nov 25 10:05:58 crc kubenswrapper[4769]: W1125 10:05:58.581356 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda106d7cb_7fd1_404c_8bc1_6d5ac15fcdf1.slice/crio-26f642476593e0f37458804d797d75406bd39da86ae88fcd7b55ce14ab86b117 WatchSource:0}: Error finding container 26f642476593e0f37458804d797d75406bd39da86ae88fcd7b55ce14ab86b117: Status 404 returned error can't find the container with id 26f642476593e0f37458804d797d75406bd39da86ae88fcd7b55ce14ab86b117 Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.832901 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-lfvzs"] Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.874420 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-9zbdp"] Nov 25 10:05:58 crc kubenswrapper[4769]: W1125 10:05:58.876414 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded79e867_002b_4591_b757_0410b73a43ef.slice/crio-1b299edccb75078078891e3e6f3e826b2fc970dff0fe3338cd40cf06669505bb WatchSource:0}: Error finding container 1b299edccb75078078891e3e6f3e826b2fc970dff0fe3338cd40cf06669505bb: Status 404 returned error can't find the container with id 1b299edccb75078078891e3e6f3e826b2fc970dff0fe3338cd40cf06669505bb Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.927729 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-jrgf9"] Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.960599 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-v9lcm"] Nov 25 10:05:58 crc kubenswrapper[4769]: I1125 10:05:58.973088 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.195217 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.505469 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" event={"ID":"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd","Type":"ContainerStarted","Data":"1eab4b9f74ff21467860ddd29f0b11792f8c51d96c5fbf0dffb4ba15a746e05a"} Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.510993 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-vbhsj" event={"ID":"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d","Type":"ContainerStarted","Data":"c382688aa5e0a4dfc850c1e90963dd8063c3ffd50a52c7e99ad9a7bdc9081fc8"} Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.516907 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1","Type":"ContainerStarted","Data":"e872c288c63b5ed5c4f205ee77d9b6eee721f059a290ff70e2d94306162d134a"} Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.518517 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-5tt9b" event={"ID":"639faafa-26cf-4b2e-831b-bc95e327cb3b","Type":"ContainerStarted","Data":"15a3e01d7e3f65c689cf62c1ca58e68203d091835e45762906e055c673cbfd4d"} Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.519471 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" event={"ID":"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1","Type":"ContainerStarted","Data":"26f642476593e0f37458804d797d75406bd39da86ae88fcd7b55ce14ab86b117"} Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.520602 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3373fd6b-9a0f-4268-8476-382740118f35","Type":"ContainerStarted","Data":"5124b006ca83e899eebb48422737db90f4318001cd79ebddcbfccc09c161d2fb"} Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.522454 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lfvzs" event={"ID":"ed79e867-002b-4591-b757-0410b73a43ef","Type":"ContainerStarted","Data":"1b299edccb75078078891e3e6f3e826b2fc970dff0fe3338cd40cf06669505bb"} Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.527048 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-jrgf9" event={"ID":"5bb7cfa5-1e81-4d93-a15a-77260adec0d3","Type":"ContainerStarted","Data":"6323a9b4b8af0f4d7206cd9139566b5555a2aeb0771490329ed8c81d536fdf66"} Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.533381 4769 generic.go:334] "Generic (PLEG): container finished" podID="4e7c7e4a-8aad-4693-81de-0029a90c48ac" containerID="65f779b115981bb11398df135b35a40ffc144a6aa60ffe09482b5df5ada622eb" exitCode=0 Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.533457 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnz6t-config-tk4p5" event={"ID":"4e7c7e4a-8aad-4693-81de-0029a90c48ac","Type":"ContainerDied","Data":"65f779b115981bb11398df135b35a40ffc144a6aa60ffe09482b5df5ada622eb"} Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.540617 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-vbhsj" podStartSLOduration=4.540599391 podStartE2EDuration="4.540599391s" podCreationTimestamp="2025-11-25 10:05:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:05:59.530595275 +0000 UTC m=+1308.115567608" watchObservedRunningTime="2025-11-25 10:05:59.540599391 +0000 UTC m=+1308.125571694" Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.549033 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pvsgh" event={"ID":"175271fe-6677-49b5-b497-c45ef1816fb7","Type":"ContainerStarted","Data":"34dbacde582f659e8d43b96db337d5e219af871257cf42e6deb6b54de031e4af"} Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.572395 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"38fa4771440900be7b93e4cc5a5e979cfdbebf216466faf7083e91e1c1a20190"} Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.572443 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"47a063d72c6505dd2771d0e342c12d4f2086a11753740ba3b95c9e114a017be9"} Nov 25 10:05:59 crc kubenswrapper[4769]: I1125 10:05:59.575369 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-9zbdp" event={"ID":"29edc9fc-ffe1-4511-9f89-9d0795f063cb","Type":"ContainerStarted","Data":"2018496b59b712e564fb882795b5a8cb7aef6ee9dae4dc65365e6bde2e1c3559"} Nov 25 10:06:00 crc kubenswrapper[4769]: I1125 10:06:00.592410 4769 generic.go:334] "Generic (PLEG): container finished" podID="e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" containerID="8247f88a653dcdf8c849249384a7e5a03bdd93533e5563d5f1052ebb9f522085" exitCode=0 Nov 25 10:06:00 crc kubenswrapper[4769]: I1125 10:06:00.592982 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" event={"ID":"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd","Type":"ContainerDied","Data":"8247f88a653dcdf8c849249384a7e5a03bdd93533e5563d5f1052ebb9f522085"} Nov 25 10:06:00 crc kubenswrapper[4769]: I1125 10:06:00.600385 4769 generic.go:334] "Generic (PLEG): container finished" podID="a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1" containerID="da319549c4153656405fe3a39de18f13fce4d446e14329903249c33ae8d920e3" exitCode=0 Nov 25 10:06:00 crc kubenswrapper[4769]: I1125 10:06:00.600563 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" event={"ID":"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1","Type":"ContainerDied","Data":"da319549c4153656405fe3a39de18f13fce4d446e14329903249c33ae8d920e3"} Nov 25 10:06:00 crc kubenswrapper[4769]: I1125 10:06:00.608566 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-jrgf9" event={"ID":"5bb7cfa5-1e81-4d93-a15a-77260adec0d3","Type":"ContainerStarted","Data":"b1f251e19bc2fedf163c481271d36edac389467986138a068d966dfff510ee10"} Nov 25 10:06:00 crc kubenswrapper[4769]: I1125 10:06:00.635118 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"94b84de353fb4826e8a56f5b70acc231851322d70477210896c0f29f965ccfe0"} Nov 25 10:06:00 crc kubenswrapper[4769]: I1125 10:06:00.716116 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-jrgf9" podStartSLOduration=5.716085941 podStartE2EDuration="5.716085941s" podCreationTimestamp="2025-11-25 10:05:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:00.70589316 +0000 UTC m=+1309.290865473" watchObservedRunningTime="2025-11-25 10:06:00.716085941 +0000 UTC m=+1309.301058254" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.423915 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.490795 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-ovsdbserver-nb\") pod \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.490892 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8zxj\" (UniqueName: \"kubernetes.io/projected/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-kube-api-access-h8zxj\") pod \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.490996 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-config\") pod \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.491017 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-dns-svc\") pod \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.491568 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-ovsdbserver-sb\") pod \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\" (UID: \"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1\") " Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.507030 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-kube-api-access-h8zxj" (OuterVolumeSpecName: "kube-api-access-h8zxj") pod "a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1" (UID: "a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1"). InnerVolumeSpecName "kube-api-access-h8zxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.532458 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1" (UID: "a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.546914 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-config" (OuterVolumeSpecName: "config") pod "a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1" (UID: "a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.554827 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1" (UID: "a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.555038 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1" (UID: "a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.601681 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.601723 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.601740 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.601755 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.601773 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8zxj\" (UniqueName: \"kubernetes.io/projected/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1-kube-api-access-h8zxj\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.739088 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" event={"ID":"a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1","Type":"ContainerDied","Data":"26f642476593e0f37458804d797d75406bd39da86ae88fcd7b55ce14ab86b117"} Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.739174 4769 scope.go:117] "RemoveContainer" containerID="da319549c4153656405fe3a39de18f13fce4d446e14329903249c33ae8d920e3" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.739197 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f877ddd87-ttfzd" Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.833648 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-ttfzd"] Nov 25 10:06:01 crc kubenswrapper[4769]: I1125 10:06:01.858656 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-ttfzd"] Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.220177 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.303922 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1" path="/var/lib/kubelet/pods/a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1/volumes" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.326169 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkgmx\" (UniqueName: \"kubernetes.io/projected/4e7c7e4a-8aad-4693-81de-0029a90c48ac-kube-api-access-gkgmx\") pod \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.326228 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4e7c7e4a-8aad-4693-81de-0029a90c48ac-additional-scripts\") pod \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.326278 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-run-ovn\") pod \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.326419 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-run\") pod \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.326519 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e7c7e4a-8aad-4693-81de-0029a90c48ac-scripts\") pod \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.327893 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-log-ovn\") pod \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\" (UID: \"4e7c7e4a-8aad-4693-81de-0029a90c48ac\") " Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.344083 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e7c7e4a-8aad-4693-81de-0029a90c48ac-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "4e7c7e4a-8aad-4693-81de-0029a90c48ac" (UID: "4e7c7e4a-8aad-4693-81de-0029a90c48ac"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.344133 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "4e7c7e4a-8aad-4693-81de-0029a90c48ac" (UID: "4e7c7e4a-8aad-4693-81de-0029a90c48ac"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.344432 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "4e7c7e4a-8aad-4693-81de-0029a90c48ac" (UID: "4e7c7e4a-8aad-4693-81de-0029a90c48ac"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.345218 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e7c7e4a-8aad-4693-81de-0029a90c48ac-scripts" (OuterVolumeSpecName: "scripts") pod "4e7c7e4a-8aad-4693-81de-0029a90c48ac" (UID: "4e7c7e4a-8aad-4693-81de-0029a90c48ac"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.345436 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-run" (OuterVolumeSpecName: "var-run") pod "4e7c7e4a-8aad-4693-81de-0029a90c48ac" (UID: "4e7c7e4a-8aad-4693-81de-0029a90c48ac"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.347180 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e7c7e4a-8aad-4693-81de-0029a90c48ac-kube-api-access-gkgmx" (OuterVolumeSpecName: "kube-api-access-gkgmx") pod "4e7c7e4a-8aad-4693-81de-0029a90c48ac" (UID: "4e7c7e4a-8aad-4693-81de-0029a90c48ac"). InnerVolumeSpecName "kube-api-access-gkgmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.430978 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkgmx\" (UniqueName: \"kubernetes.io/projected/4e7c7e4a-8aad-4693-81de-0029a90c48ac-kube-api-access-gkgmx\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.431033 4769 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4e7c7e4a-8aad-4693-81de-0029a90c48ac-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.431044 4769 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.431058 4769 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.431067 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4e7c7e4a-8aad-4693-81de-0029a90c48ac-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.431076 4769 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4e7c7e4a-8aad-4693-81de-0029a90c48ac-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.785675 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-tnz6t-config-tk4p5" event={"ID":"4e7c7e4a-8aad-4693-81de-0029a90c48ac","Type":"ContainerDied","Data":"a4fc419d0487c71b8041878737683002d852c5944c6c20daafa72b6cd319724d"} Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.786108 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4fc419d0487c71b8041878737683002d852c5944c6c20daafa72b6cd319724d" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.786064 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-tnz6t-config-tk4p5" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.799299 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" event={"ID":"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd","Type":"ContainerStarted","Data":"ad05b9dd04cd38e185f707963ff959077116f0538529b73da8c957bacb064692"} Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.799747 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.829599 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1","Type":"ContainerStarted","Data":"e93787bff76970face9018888dc696101bfae773610d872b38673f25b61073bd"} Nov 25 10:06:02 crc kubenswrapper[4769]: I1125 10:06:02.833783 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" podStartSLOduration=7.833763694 podStartE2EDuration="7.833763694s" podCreationTimestamp="2025-11-25 10:05:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:02.828139664 +0000 UTC m=+1311.413111987" watchObservedRunningTime="2025-11-25 10:06:02.833763694 +0000 UTC m=+1311.418736007" Nov 25 10:06:03 crc kubenswrapper[4769]: I1125 10:06:03.335094 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-tnz6t-config-tk4p5"] Nov 25 10:06:03 crc kubenswrapper[4769]: I1125 10:06:03.347978 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-tnz6t-config-tk4p5"] Nov 25 10:06:04 crc kubenswrapper[4769]: I1125 10:06:04.258348 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e7c7e4a-8aad-4693-81de-0029a90c48ac" path="/var/lib/kubelet/pods/4e7c7e4a-8aad-4693-81de-0029a90c48ac/volumes" Nov 25 10:06:05 crc kubenswrapper[4769]: I1125 10:06:05.874045 4769 generic.go:334] "Generic (PLEG): container finished" podID="fa5947eb-7a68-40d9-8ea3-1dd6719dee8d" containerID="c382688aa5e0a4dfc850c1e90963dd8063c3ffd50a52c7e99ad9a7bdc9081fc8" exitCode=0 Nov 25 10:06:05 crc kubenswrapper[4769]: I1125 10:06:05.874134 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-vbhsj" event={"ID":"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d","Type":"ContainerDied","Data":"c382688aa5e0a4dfc850c1e90963dd8063c3ffd50a52c7e99ad9a7bdc9081fc8"} Nov 25 10:06:05 crc kubenswrapper[4769]: I1125 10:06:05.879016 4769 generic.go:334] "Generic (PLEG): container finished" podID="28b97e59-606d-4810-bec5-cecbc1c691bc" containerID="b176b2b613fd303e61f4c5784dbcb2a06c08b8533cfc13538dd118b0c84b0a99" exitCode=0 Nov 25 10:06:05 crc kubenswrapper[4769]: I1125 10:06:05.879066 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jm2np" event={"ID":"28b97e59-606d-4810-bec5-cecbc1c691bc","Type":"ContainerDied","Data":"b176b2b613fd303e61f4c5784dbcb2a06c08b8533cfc13538dd118b0c84b0a99"} Nov 25 10:06:10 crc kubenswrapper[4769]: I1125 10:06:10.974164 4769 generic.go:334] "Generic (PLEG): container finished" podID="8ef4c37f-e3dc-4994-92bf-b41e7c215ef1" containerID="e93787bff76970face9018888dc696101bfae773610d872b38673f25b61073bd" exitCode=0 Nov 25 10:06:10 crc kubenswrapper[4769]: I1125 10:06:10.974424 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1","Type":"ContainerDied","Data":"e93787bff76970face9018888dc696101bfae773610d872b38673f25b61073bd"} Nov 25 10:06:11 crc kubenswrapper[4769]: I1125 10:06:11.905133 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:06:11 crc kubenswrapper[4769]: I1125 10:06:11.970056 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2tlt4"] Nov 25 10:06:11 crc kubenswrapper[4769]: I1125 10:06:11.970369 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-2tlt4" podUID="0dffefd7-a278-4acd-87f4-0b1585dfb869" containerName="dnsmasq-dns" containerID="cri-o://b3f62375ae317683f5b0dd0fa4c637d50ca7f4926d68f49b5bdb5aa536893a5a" gracePeriod=10 Nov 25 10:06:12 crc kubenswrapper[4769]: I1125 10:06:12.245795 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-2tlt4" podUID="0dffefd7-a278-4acd-87f4-0b1585dfb869" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.150:5353: connect: connection refused" Nov 25 10:06:13 crc kubenswrapper[4769]: I1125 10:06:13.009998 4769 generic.go:334] "Generic (PLEG): container finished" podID="0dffefd7-a278-4acd-87f4-0b1585dfb869" containerID="b3f62375ae317683f5b0dd0fa4c637d50ca7f4926d68f49b5bdb5aa536893a5a" exitCode=0 Nov 25 10:06:13 crc kubenswrapper[4769]: I1125 10:06:13.010091 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2tlt4" event={"ID":"0dffefd7-a278-4acd-87f4-0b1585dfb869","Type":"ContainerDied","Data":"b3f62375ae317683f5b0dd0fa4c637d50ca7f4926d68f49b5bdb5aa536893a5a"} Nov 25 10:06:15 crc kubenswrapper[4769]: E1125 10:06:15.257467 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 25 10:06:15 crc kubenswrapper[4769]: E1125 10:06:15.258136 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n645h584h67bh5bch5d4hfch94h78h5d7h84h5fdhf5h68hddhcdhb5h68bh585h5h66fh5ch6fh68bh5b7hdh645h54h587h569hfh5dbh88q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-44v62,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3373fd6b-9a0f-4268-8476-382740118f35): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.390318 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jm2np" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.400338 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.506134 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmdnj\" (UniqueName: \"kubernetes.io/projected/28b97e59-606d-4810-bec5-cecbc1c691bc-kube-api-access-kmdnj\") pod \"28b97e59-606d-4810-bec5-cecbc1c691bc\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.506211 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-fernet-keys\") pod \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.506344 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-combined-ca-bundle\") pod \"28b97e59-606d-4810-bec5-cecbc1c691bc\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.506414 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-db-sync-config-data\") pod \"28b97e59-606d-4810-bec5-cecbc1c691bc\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.506526 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-credential-keys\") pod \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.506557 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-config-data\") pod \"28b97e59-606d-4810-bec5-cecbc1c691bc\" (UID: \"28b97e59-606d-4810-bec5-cecbc1c691bc\") " Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.506591 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7sn8m\" (UniqueName: \"kubernetes.io/projected/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-kube-api-access-7sn8m\") pod \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.506624 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-config-data\") pod \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.506708 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-combined-ca-bundle\") pod \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.506739 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-scripts\") pod \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\" (UID: \"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d\") " Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.523189 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "fa5947eb-7a68-40d9-8ea3-1dd6719dee8d" (UID: "fa5947eb-7a68-40d9-8ea3-1dd6719dee8d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.523549 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "fa5947eb-7a68-40d9-8ea3-1dd6719dee8d" (UID: "fa5947eb-7a68-40d9-8ea3-1dd6719dee8d"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.525216 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "28b97e59-606d-4810-bec5-cecbc1c691bc" (UID: "28b97e59-606d-4810-bec5-cecbc1c691bc"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.525579 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-scripts" (OuterVolumeSpecName: "scripts") pod "fa5947eb-7a68-40d9-8ea3-1dd6719dee8d" (UID: "fa5947eb-7a68-40d9-8ea3-1dd6719dee8d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.525938 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-kube-api-access-7sn8m" (OuterVolumeSpecName: "kube-api-access-7sn8m") pod "fa5947eb-7a68-40d9-8ea3-1dd6719dee8d" (UID: "fa5947eb-7a68-40d9-8ea3-1dd6719dee8d"). InnerVolumeSpecName "kube-api-access-7sn8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.536184 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28b97e59-606d-4810-bec5-cecbc1c691bc-kube-api-access-kmdnj" (OuterVolumeSpecName: "kube-api-access-kmdnj") pod "28b97e59-606d-4810-bec5-cecbc1c691bc" (UID: "28b97e59-606d-4810-bec5-cecbc1c691bc"). InnerVolumeSpecName "kube-api-access-kmdnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.544631 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-config-data" (OuterVolumeSpecName: "config-data") pod "fa5947eb-7a68-40d9-8ea3-1dd6719dee8d" (UID: "fa5947eb-7a68-40d9-8ea3-1dd6719dee8d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.551804 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fa5947eb-7a68-40d9-8ea3-1dd6719dee8d" (UID: "fa5947eb-7a68-40d9-8ea3-1dd6719dee8d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.577486 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-config-data" (OuterVolumeSpecName: "config-data") pod "28b97e59-606d-4810-bec5-cecbc1c691bc" (UID: "28b97e59-606d-4810-bec5-cecbc1c691bc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.580912 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "28b97e59-606d-4810-bec5-cecbc1c691bc" (UID: "28b97e59-606d-4810-bec5-cecbc1c691bc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.609496 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kmdnj\" (UniqueName: \"kubernetes.io/projected/28b97e59-606d-4810-bec5-cecbc1c691bc-kube-api-access-kmdnj\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.609529 4769 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.609541 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.609552 4769 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.609563 4769 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.609572 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28b97e59-606d-4810-bec5-cecbc1c691bc-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.609582 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7sn8m\" (UniqueName: \"kubernetes.io/projected/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-kube-api-access-7sn8m\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.609591 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.609600 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:15 crc kubenswrapper[4769]: I1125 10:06:15.609611 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.041680 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-vbhsj" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.041686 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-vbhsj" event={"ID":"fa5947eb-7a68-40d9-8ea3-1dd6719dee8d","Type":"ContainerDied","Data":"082e66335ce4e3a6ae2b3f843befe009ea2a04261145c6e0ac8a583c2da38120"} Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.042159 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="082e66335ce4e3a6ae2b3f843befe009ea2a04261145c6e0ac8a583c2da38120" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.044630 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jm2np" event={"ID":"28b97e59-606d-4810-bec5-cecbc1c691bc","Type":"ContainerDied","Data":"387eee9d306f0fd00088598a66c1c7e341e649f3b21a8e6e37dde9ffdeb861ca"} Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.044710 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="387eee9d306f0fd00088598a66c1c7e341e649f3b21a8e6e37dde9ffdeb861ca" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.044652 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jm2np" Nov 25 10:06:16 crc kubenswrapper[4769]: E1125 10:06:16.192347 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa5947eb_7a68_40d9_8ea3_1dd6719dee8d.slice/crio-082e66335ce4e3a6ae2b3f843befe009ea2a04261145c6e0ac8a583c2da38120\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28b97e59_606d_4810_bec5_cecbc1c691bc.slice\": RecentStats: unable to find data in memory cache]" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.602561 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-vbhsj"] Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.620928 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-vbhsj"] Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.762041 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-qkr9g"] Nov 25 10:06:16 crc kubenswrapper[4769]: E1125 10:06:16.763105 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1" containerName="init" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.763124 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1" containerName="init" Nov 25 10:06:16 crc kubenswrapper[4769]: E1125 10:06:16.763143 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa5947eb-7a68-40d9-8ea3-1dd6719dee8d" containerName="keystone-bootstrap" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.763152 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa5947eb-7a68-40d9-8ea3-1dd6719dee8d" containerName="keystone-bootstrap" Nov 25 10:06:16 crc kubenswrapper[4769]: E1125 10:06:16.763195 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b97e59-606d-4810-bec5-cecbc1c691bc" containerName="glance-db-sync" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.763204 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b97e59-606d-4810-bec5-cecbc1c691bc" containerName="glance-db-sync" Nov 25 10:06:16 crc kubenswrapper[4769]: E1125 10:06:16.763219 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e7c7e4a-8aad-4693-81de-0029a90c48ac" containerName="ovn-config" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.763226 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e7c7e4a-8aad-4693-81de-0029a90c48ac" containerName="ovn-config" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.763479 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="28b97e59-606d-4810-bec5-cecbc1c691bc" containerName="glance-db-sync" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.763503 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa5947eb-7a68-40d9-8ea3-1dd6719dee8d" containerName="keystone-bootstrap" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.763519 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e7c7e4a-8aad-4693-81de-0029a90c48ac" containerName="ovn-config" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.763532 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="a106d7cb-7fd1-404c-8bc1-6d5ac15fcdf1" containerName="init" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.764630 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.769733 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.770185 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.770392 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-bnc4t" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.770519 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.780679 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.804015 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qkr9g"] Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.846633 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-scripts\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.846691 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-fernet-keys\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.846718 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dg2tm\" (UniqueName: \"kubernetes.io/projected/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-kube-api-access-dg2tm\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.846877 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-credential-keys\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.847169 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-combined-ca-bundle\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.847406 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-config-data\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.855480 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f84976bdf-f7s6r"] Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.857673 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.874813 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f84976bdf-f7s6r"] Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.950834 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-credential-keys\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.950914 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-config\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.953452 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-combined-ca-bundle\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.953535 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-dns-svc\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.953794 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-config-data\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.954120 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2q6z\" (UniqueName: \"kubernetes.io/projected/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-kube-api-access-q2q6z\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.954244 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-scripts\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.954318 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-fernet-keys\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.954364 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-ovsdbserver-nb\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.954393 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dg2tm\" (UniqueName: \"kubernetes.io/projected/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-kube-api-access-dg2tm\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.954446 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-ovsdbserver-sb\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.963588 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-config-data\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.963681 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-combined-ca-bundle\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.964535 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-credential-keys\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.977241 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-scripts\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:16 crc kubenswrapper[4769]: I1125 10:06:16.991277 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-fernet-keys\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.002311 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dg2tm\" (UniqueName: \"kubernetes.io/projected/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-kube-api-access-dg2tm\") pod \"keystone-bootstrap-qkr9g\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.064674 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-dns-svc\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.064820 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2q6z\" (UniqueName: \"kubernetes.io/projected/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-kube-api-access-q2q6z\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.064868 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-ovsdbserver-nb\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.064894 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-ovsdbserver-sb\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.064936 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-config\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.065800 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-config\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.068774 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-ovsdbserver-nb\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.069370 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-ovsdbserver-sb\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.069788 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-dns-svc\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.096855 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.132242 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2q6z\" (UniqueName: \"kubernetes.io/projected/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-kube-api-access-q2q6z\") pod \"dnsmasq-dns-f84976bdf-f7s6r\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.188720 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:17 crc kubenswrapper[4769]: I1125 10:06:17.238511 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-2tlt4" podUID="0dffefd7-a278-4acd-87f4-0b1585dfb869" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.150:5353: connect: connection refused" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.022071 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.024585 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.040164 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.060867 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.061342 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.064539 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-45xsn" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.115229 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.115337 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.115430 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zm29f\" (UniqueName: \"kubernetes.io/projected/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-kube-api-access-zm29f\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.115496 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-logs\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.116094 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.116168 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-scripts\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.116253 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-config-data\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.157129 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.166751 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.172880 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.177987 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.218468 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fc0affe7-9d6e-4a96-90de-f8206cc38627-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.218530 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.218554 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-scripts\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.218578 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xgj5\" (UniqueName: \"kubernetes.io/projected/fc0affe7-9d6e-4a96-90de-f8206cc38627-kube-api-access-7xgj5\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.218764 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-config-data\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.218948 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.218991 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.219015 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.219032 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.219058 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.219081 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zm29f\" (UniqueName: \"kubernetes.io/projected/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-kube-api-access-zm29f\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.219102 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.219123 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-logs\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.219225 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc0affe7-9d6e-4a96-90de-f8206cc38627-logs\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.219665 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.220337 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.222040 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-logs\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.228513 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-scripts\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.230554 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.253333 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zm29f\" (UniqueName: \"kubernetes.io/projected/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-kube-api-access-zm29f\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.255239 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-config-data\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.255462 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.264990 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa5947eb-7a68-40d9-8ea3-1dd6719dee8d" path="/var/lib/kubelet/pods/fa5947eb-7a68-40d9-8ea3-1dd6719dee8d/volumes" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.321769 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc0affe7-9d6e-4a96-90de-f8206cc38627-logs\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.321899 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fc0affe7-9d6e-4a96-90de-f8206cc38627-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.321930 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xgj5\" (UniqueName: \"kubernetes.io/projected/fc0affe7-9d6e-4a96-90de-f8206cc38627-kube-api-access-7xgj5\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.322036 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.322058 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.322079 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.322104 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.322694 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.323279 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fc0affe7-9d6e-4a96-90de-f8206cc38627-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.327747 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc0affe7-9d6e-4a96-90de-f8206cc38627-logs\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.343170 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.345691 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xgj5\" (UniqueName: \"kubernetes.io/projected/fc0affe7-9d6e-4a96-90de-f8206cc38627-kube-api-access-7xgj5\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.346335 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.353971 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.374669 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.399264 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:06:18 crc kubenswrapper[4769]: I1125 10:06:18.505946 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:20 crc kubenswrapper[4769]: I1125 10:06:20.403419 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:06:20 crc kubenswrapper[4769]: I1125 10:06:20.480791 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:06:22 crc kubenswrapper[4769]: I1125 10:06:22.244189 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-2tlt4" podUID="0dffefd7-a278-4acd-87f4-0b1585dfb869" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.150:5353: connect: connection refused" Nov 25 10:06:22 crc kubenswrapper[4769]: I1125 10:06:22.248375 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:06:22 crc kubenswrapper[4769]: I1125 10:06:22.289989 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:06:22 crc kubenswrapper[4769]: I1125 10:06:22.290038 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:06:24 crc kubenswrapper[4769]: I1125 10:06:24.164223 4769 generic.go:334] "Generic (PLEG): container finished" podID="5bb7cfa5-1e81-4d93-a15a-77260adec0d3" containerID="b1f251e19bc2fedf163c481271d36edac389467986138a068d966dfff510ee10" exitCode=0 Nov 25 10:06:24 crc kubenswrapper[4769]: I1125 10:06:24.164305 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-jrgf9" event={"ID":"5bb7cfa5-1e81-4d93-a15a-77260adec0d3","Type":"ContainerDied","Data":"b1f251e19bc2fedf163c481271d36edac389467986138a068d966dfff510ee10"} Nov 25 10:06:27 crc kubenswrapper[4769]: I1125 10:06:27.239360 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-2tlt4" podUID="0dffefd7-a278-4acd-87f4-0b1585dfb869" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.150:5353: connect: connection refused" Nov 25 10:06:30 crc kubenswrapper[4769]: E1125 10:06:30.083621 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified" Nov 25 10:06:30 crc kubenswrapper[4769]: E1125 10:06:30.084310 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5m4h2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5tt9b_openstack(639faafa-26cf-4b2e-831b-bc95e327cb3b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:06:30 crc kubenswrapper[4769]: E1125 10:06:30.085466 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-5tt9b" podUID="639faafa-26cf-4b2e-831b-bc95e327cb3b" Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.173085 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.264802 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfwkc\" (UniqueName: \"kubernetes.io/projected/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-kube-api-access-rfwkc\") pod \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\" (UID: \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\") " Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.264903 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-combined-ca-bundle\") pod \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\" (UID: \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\") " Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.264958 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-config\") pod \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\" (UID: \"5bb7cfa5-1e81-4d93-a15a-77260adec0d3\") " Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.268313 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-jrgf9" event={"ID":"5bb7cfa5-1e81-4d93-a15a-77260adec0d3","Type":"ContainerDied","Data":"6323a9b4b8af0f4d7206cd9139566b5555a2aeb0771490329ed8c81d536fdf66"} Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.268355 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-jrgf9" Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.268362 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6323a9b4b8af0f4d7206cd9139566b5555a2aeb0771490329ed8c81d536fdf66" Nov 25 10:06:30 crc kubenswrapper[4769]: E1125 10:06:30.282242 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified\\\"\"" pod="openstack/heat-db-sync-5tt9b" podUID="639faafa-26cf-4b2e-831b-bc95e327cb3b" Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.282411 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-kube-api-access-rfwkc" (OuterVolumeSpecName: "kube-api-access-rfwkc") pod "5bb7cfa5-1e81-4d93-a15a-77260adec0d3" (UID: "5bb7cfa5-1e81-4d93-a15a-77260adec0d3"). InnerVolumeSpecName "kube-api-access-rfwkc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.303601 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-config" (OuterVolumeSpecName: "config") pod "5bb7cfa5-1e81-4d93-a15a-77260adec0d3" (UID: "5bb7cfa5-1e81-4d93-a15a-77260adec0d3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.307112 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5bb7cfa5-1e81-4d93-a15a-77260adec0d3" (UID: "5bb7cfa5-1e81-4d93-a15a-77260adec0d3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.373344 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfwkc\" (UniqueName: \"kubernetes.io/projected/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-kube-api-access-rfwkc\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.373405 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:30 crc kubenswrapper[4769]: I1125 10:06:30.373419 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/5bb7cfa5-1e81-4d93-a15a-77260adec0d3-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.470156 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84976bdf-f7s6r"] Nov 25 10:06:31 crc kubenswrapper[4769]: E1125 10:06:31.503848 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 25 10:06:31 crc kubenswrapper[4769]: E1125 10:06:31.504421 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9rbnp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-lfvzs_openstack(ed79e867-002b-4591-b757-0410b73a43ef): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:06:31 crc kubenswrapper[4769]: E1125 10:06:31.507064 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-lfvzs" podUID="ed79e867-002b-4591-b757-0410b73a43ef" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.543363 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fb745b69-rp7dd"] Nov 25 10:06:31 crc kubenswrapper[4769]: E1125 10:06:31.544169 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bb7cfa5-1e81-4d93-a15a-77260adec0d3" containerName="neutron-db-sync" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.544197 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bb7cfa5-1e81-4d93-a15a-77260adec0d3" containerName="neutron-db-sync" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.544476 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bb7cfa5-1e81-4d93-a15a-77260adec0d3" containerName="neutron-db-sync" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.546157 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.560817 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-rp7dd"] Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.595152 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-75dfc45cc4-m9wmn"] Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.603939 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.604316 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-config\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.604391 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-ovsdbserver-nb\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.604428 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lwbz\" (UniqueName: \"kubernetes.io/projected/3322c8d5-8fb0-4c40-8967-e20d2232a127-kube-api-access-4lwbz\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.604531 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-dns-svc\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.604622 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-ovsdbserver-sb\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.612864 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.613207 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-zhplb" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.613232 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.613235 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.616115 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-75dfc45cc4-m9wmn"] Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.706720 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-config\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.706770 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-ovsdbserver-nb\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.706799 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lwbz\" (UniqueName: \"kubernetes.io/projected/3322c8d5-8fb0-4c40-8967-e20d2232a127-kube-api-access-4lwbz\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.706826 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-combined-ca-bundle\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.706878 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-ovndb-tls-certs\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.706941 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-dns-svc\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.706980 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-config\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.707073 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-ovsdbserver-sb\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.707123 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2jm7\" (UniqueName: \"kubernetes.io/projected/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-kube-api-access-k2jm7\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.707218 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-httpd-config\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.708427 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-config\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.708488 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-ovsdbserver-nb\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.708726 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-dns-svc\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.709073 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-ovsdbserver-sb\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.761685 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lwbz\" (UniqueName: \"kubernetes.io/projected/3322c8d5-8fb0-4c40-8967-e20d2232a127-kube-api-access-4lwbz\") pod \"dnsmasq-dns-fb745b69-rp7dd\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.809264 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-ovndb-tls-certs\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.809345 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-config\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.809427 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2jm7\" (UniqueName: \"kubernetes.io/projected/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-kube-api-access-k2jm7\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.809471 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-httpd-config\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.809573 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-combined-ca-bundle\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.814503 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-ovndb-tls-certs\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.817079 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-config\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.819802 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-httpd-config\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.819989 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-combined-ca-bundle\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.832621 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2jm7\" (UniqueName: \"kubernetes.io/projected/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-kube-api-access-k2jm7\") pod \"neutron-75dfc45cc4-m9wmn\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.892703 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.929461 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:06:31 crc kubenswrapper[4769]: I1125 10:06:31.941142 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.013820 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wg6b8\" (UniqueName: \"kubernetes.io/projected/0dffefd7-a278-4acd-87f4-0b1585dfb869-kube-api-access-wg6b8\") pod \"0dffefd7-a278-4acd-87f4-0b1585dfb869\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.013949 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-config\") pod \"0dffefd7-a278-4acd-87f4-0b1585dfb869\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.014045 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-dns-svc\") pod \"0dffefd7-a278-4acd-87f4-0b1585dfb869\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.014097 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-ovsdbserver-sb\") pod \"0dffefd7-a278-4acd-87f4-0b1585dfb869\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.014184 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-ovsdbserver-nb\") pod \"0dffefd7-a278-4acd-87f4-0b1585dfb869\" (UID: \"0dffefd7-a278-4acd-87f4-0b1585dfb869\") " Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.036370 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dffefd7-a278-4acd-87f4-0b1585dfb869-kube-api-access-wg6b8" (OuterVolumeSpecName: "kube-api-access-wg6b8") pod "0dffefd7-a278-4acd-87f4-0b1585dfb869" (UID: "0dffefd7-a278-4acd-87f4-0b1585dfb869"). InnerVolumeSpecName "kube-api-access-wg6b8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.117179 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wg6b8\" (UniqueName: \"kubernetes.io/projected/0dffefd7-a278-4acd-87f4-0b1585dfb869-kube-api-access-wg6b8\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.141154 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0dffefd7-a278-4acd-87f4-0b1585dfb869" (UID: "0dffefd7-a278-4acd-87f4-0b1585dfb869"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.141207 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-config" (OuterVolumeSpecName: "config") pod "0dffefd7-a278-4acd-87f4-0b1585dfb869" (UID: "0dffefd7-a278-4acd-87f4-0b1585dfb869"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.145121 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0dffefd7-a278-4acd-87f4-0b1585dfb869" (UID: "0dffefd7-a278-4acd-87f4-0b1585dfb869"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.154676 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0dffefd7-a278-4acd-87f4-0b1585dfb869" (UID: "0dffefd7-a278-4acd-87f4-0b1585dfb869"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.219307 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.219340 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.219351 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.219359 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0dffefd7-a278-4acd-87f4-0b1585dfb869-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.343871 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-2tlt4" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.344009 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-2tlt4" event={"ID":"0dffefd7-a278-4acd-87f4-0b1585dfb869","Type":"ContainerDied","Data":"1618b973e812b33e4c98117fc6a2c11d601c0c164ea9801edbe75661cdbaf891"} Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.344228 4769 scope.go:117] "RemoveContainer" containerID="b3f62375ae317683f5b0dd0fa4c637d50ca7f4926d68f49b5bdb5aa536893a5a" Nov 25 10:06:32 crc kubenswrapper[4769]: E1125 10:06:32.350870 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-lfvzs" podUID="ed79e867-002b-4591-b757-0410b73a43ef" Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.405822 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2tlt4"] Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.415571 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-2tlt4"] Nov 25 10:06:32 crc kubenswrapper[4769]: I1125 10:06:32.870129 4769 scope.go:117] "RemoveContainer" containerID="b23b88a6ed53c4c1070c204f3c577cb48da6a6379782c8d55f39b3e6c9e206a0" Nov 25 10:06:33 crc kubenswrapper[4769]: I1125 10:06:33.713850 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84976bdf-f7s6r"] Nov 25 10:06:33 crc kubenswrapper[4769]: I1125 10:06:33.748412 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qkr9g"] Nov 25 10:06:33 crc kubenswrapper[4769]: W1125 10:06:33.767135 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee29c33a_80a8_4f26_81bd_1af50bbbaabb.slice/crio-5458feb12a34eecf34938bcf7724589bc9dfd9b423c4424680d314eba27ecf92 WatchSource:0}: Error finding container 5458feb12a34eecf34938bcf7724589bc9dfd9b423c4424680d314eba27ecf92: Status 404 returned error can't find the container with id 5458feb12a34eecf34938bcf7724589bc9dfd9b423c4424680d314eba27ecf92 Nov 25 10:06:33 crc kubenswrapper[4769]: I1125 10:06:33.965026 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.007829 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-rp7dd"] Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.018951 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.091590 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-75dfc45cc4-m9wmn"] Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.281378 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dffefd7-a278-4acd-87f4-0b1585dfb869" path="/var/lib/kubelet/pods/0dffefd7-a278-4acd-87f4-0b1585dfb869/volumes" Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.470543 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"b613ac60b3984dd01b01f7a85ba86befb7b38a950f70f26f19f828ec16516b16"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.470856 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"9756956c176ba55fc105f2505a78738e0ac523796c552345d686f729f25dc6dd"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.478935 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-9zbdp" event={"ID":"29edc9fc-ffe1-4511-9f89-9d0795f063cb","Type":"ContainerStarted","Data":"09a8800fabbe0b8cff7aa77a649828ff9cb1df93447412766ac0f90f2cee23c2"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.488166 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fc0affe7-9d6e-4a96-90de-f8206cc38627","Type":"ContainerStarted","Data":"e923c8eb4c22a2913ee61530bea25d09a29dc94f6de8497d20efd3e29e56e0fb"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.492165 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3373fd6b-9a0f-4268-8476-382740118f35","Type":"ContainerStarted","Data":"d936a4df3ae46efea8ababca1e573f5db3891f5502bddf9c59cdda34ccccdc0e"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.494292 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" event={"ID":"3322c8d5-8fb0-4c40-8967-e20d2232a127","Type":"ContainerStarted","Data":"13f2654de99b4fcf7f1eb8972989548c88e7a0dc893d68076668966157aa8493"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.495775 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-75dfc45cc4-m9wmn" event={"ID":"3259d7a5-de25-4fa9-a5f8-36fc56dd733f","Type":"ContainerStarted","Data":"6a4d61afc96f7fb377aa2d042b326a4e89f84b59a0cbb597e08d0f7308b1d289"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.511847 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1","Type":"ContainerStarted","Data":"304364d3a118d2242dd2e8edfaec2a48f72929a155f5d8bfc98ef6d4b8cf54f8"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.531259 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qkr9g" event={"ID":"ee29c33a-80a8-4f26-81bd-1af50bbbaabb","Type":"ContainerStarted","Data":"c7751374f75eda43b05b2eb51be43fe0c15ca757e586d1a21f51229c42e0f892"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.531304 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qkr9g" event={"ID":"ee29c33a-80a8-4f26-81bd-1af50bbbaabb","Type":"ContainerStarted","Data":"5458feb12a34eecf34938bcf7724589bc9dfd9b423c4424680d314eba27ecf92"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.545910 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" event={"ID":"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf","Type":"ContainerStarted","Data":"f1ea5214c7dd221301e4a426f0c6725f6532430e6a189586ae6d315a61b6b69d"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.576679 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef5c65bd-86b7-418d-bc80-8df45db1b5a8","Type":"ContainerStarted","Data":"491e24fa5cb83cd623bfd74b6918be7161d79d59eef54b63566f7e4075d6d1e5"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.586375 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pvsgh" event={"ID":"175271fe-6677-49b5-b497-c45ef1816fb7","Type":"ContainerStarted","Data":"978d649392af26e7b658de6ba64a81ab9a59b5f0291633e52cc1e43cea2315f7"} Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.617244 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-qkr9g" podStartSLOduration=18.617222009 podStartE2EDuration="18.617222009s" podCreationTimestamp="2025-11-25 10:06:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:34.586617914 +0000 UTC m=+1343.171590227" watchObservedRunningTime="2025-11-25 10:06:34.617222009 +0000 UTC m=+1343.202194322" Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.619607 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-9zbdp" podStartSLOduration=6.975691422 podStartE2EDuration="39.619595573s" podCreationTimestamp="2025-11-25 10:05:55 +0000 UTC" firstStartedPulling="2025-11-25 10:05:59.010190259 +0000 UTC m=+1307.595162572" lastFinishedPulling="2025-11-25 10:06:31.65409441 +0000 UTC m=+1340.239066723" observedRunningTime="2025-11-25 10:06:34.50691543 +0000 UTC m=+1343.091887743" watchObservedRunningTime="2025-11-25 10:06:34.619595573 +0000 UTC m=+1343.204567886" Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.638194 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-pvsgh" podStartSLOduration=8.157327784 podStartE2EDuration="39.638174938s" podCreationTimestamp="2025-11-25 10:05:55 +0000 UTC" firstStartedPulling="2025-11-25 10:05:58.573629437 +0000 UTC m=+1307.158601850" lastFinishedPulling="2025-11-25 10:06:30.054476691 +0000 UTC m=+1338.639449004" observedRunningTime="2025-11-25 10:06:34.613460689 +0000 UTC m=+1343.198433002" watchObservedRunningTime="2025-11-25 10:06:34.638174938 +0000 UTC m=+1343.223147251" Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.864547 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5c9685d5f7-xbblq"] Nov 25 10:06:34 crc kubenswrapper[4769]: E1125 10:06:34.865458 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dffefd7-a278-4acd-87f4-0b1585dfb869" containerName="init" Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.866318 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dffefd7-a278-4acd-87f4-0b1585dfb869" containerName="init" Nov 25 10:06:34 crc kubenswrapper[4769]: E1125 10:06:34.866433 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dffefd7-a278-4acd-87f4-0b1585dfb869" containerName="dnsmasq-dns" Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.866599 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dffefd7-a278-4acd-87f4-0b1585dfb869" containerName="dnsmasq-dns" Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.930643 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dffefd7-a278-4acd-87f4-0b1585dfb869" containerName="dnsmasq-dns" Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.943713 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c9685d5f7-xbblq"] Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.943861 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:34 crc kubenswrapper[4769]: I1125 10:06:34.962253 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.002760 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.059981 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-config\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.060063 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-combined-ca-bundle\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.060131 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-ovndb-tls-certs\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.060182 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbn2k\" (UniqueName: \"kubernetes.io/projected/bbd368c8-da9f-4504-89e4-ffc922d67c32-kube-api-access-qbn2k\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.060216 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-internal-tls-certs\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.060245 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-httpd-config\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.060269 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-public-tls-certs\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.162623 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-ovndb-tls-certs\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.162738 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbn2k\" (UniqueName: \"kubernetes.io/projected/bbd368c8-da9f-4504-89e4-ffc922d67c32-kube-api-access-qbn2k\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.162778 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-internal-tls-certs\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.162813 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-httpd-config\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.162834 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-public-tls-certs\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.162935 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-config\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.163000 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-combined-ca-bundle\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.169544 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-ovndb-tls-certs\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.169850 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-public-tls-certs\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.173067 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-internal-tls-certs\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.173629 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-httpd-config\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.175295 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-combined-ca-bundle\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.176017 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/bbd368c8-da9f-4504-89e4-ffc922d67c32-config\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.286547 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbn2k\" (UniqueName: \"kubernetes.io/projected/bbd368c8-da9f-4504-89e4-ffc922d67c32-kube-api-access-qbn2k\") pod \"neutron-5c9685d5f7-xbblq\" (UID: \"bbd368c8-da9f-4504-89e4-ffc922d67c32\") " pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.574337 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.602419 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-75dfc45cc4-m9wmn" event={"ID":"3259d7a5-de25-4fa9-a5f8-36fc56dd733f","Type":"ContainerStarted","Data":"411b0640f97ec6f18130ff185a231014fd849e7b6725898c129a5f3fbf326f82"} Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.604731 4769 generic.go:334] "Generic (PLEG): container finished" podID="3322c8d5-8fb0-4c40-8967-e20d2232a127" containerID="102271a9bdb006e90ce1d9e16539600b102d1e5bbd3774642da00123c70da811" exitCode=0 Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.604771 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" event={"ID":"3322c8d5-8fb0-4c40-8967-e20d2232a127","Type":"ContainerDied","Data":"102271a9bdb006e90ce1d9e16539600b102d1e5bbd3774642da00123c70da811"} Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.610830 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"703c7a655aaaa99ae1c5ae4e98a163cb52435243ba1235df5698d49cb52746b1"} Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.612789 4769 generic.go:334] "Generic (PLEG): container finished" podID="48fc1d3f-c20f-4949-b0d7-5b52cff1efcf" containerID="71f8ca34e7440c116ee32dc02c10b6feb26bc52f918a2a08a3ce6dea4b74924c" exitCode=0 Nov 25 10:06:35 crc kubenswrapper[4769]: I1125 10:06:35.613790 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" event={"ID":"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf","Type":"ContainerDied","Data":"71f8ca34e7440c116ee32dc02c10b6feb26bc52f918a2a08a3ce6dea4b74924c"} Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.458935 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.561794 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c9685d5f7-xbblq"] Nov 25 10:06:36 crc kubenswrapper[4769]: W1125 10:06:36.582902 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbbd368c8_da9f_4504_89e4_ffc922d67c32.slice/crio-537a71292e261fa8a01aac719865630f3469b59e86eb8b69b73303fe9362d09d WatchSource:0}: Error finding container 537a71292e261fa8a01aac719865630f3469b59e86eb8b69b73303fe9362d09d: Status 404 returned error can't find the container with id 537a71292e261fa8a01aac719865630f3469b59e86eb8b69b73303fe9362d09d Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.621465 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-ovsdbserver-sb\") pod \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.622089 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-config\") pod \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.622279 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2q6z\" (UniqueName: \"kubernetes.io/projected/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-kube-api-access-q2q6z\") pod \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.622317 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-ovsdbserver-nb\") pod \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.622364 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-dns-svc\") pod \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\" (UID: \"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf\") " Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.646016 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fc0affe7-9d6e-4a96-90de-f8206cc38627","Type":"ContainerStarted","Data":"63477eaef06766d20bd5f9638cc991fa49165a1f3ba3609540ceaefc29b3f191"} Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.647901 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c9685d5f7-xbblq" event={"ID":"bbd368c8-da9f-4504-89e4-ffc922d67c32","Type":"ContainerStarted","Data":"537a71292e261fa8a01aac719865630f3469b59e86eb8b69b73303fe9362d09d"} Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.649577 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" event={"ID":"48fc1d3f-c20f-4949-b0d7-5b52cff1efcf","Type":"ContainerDied","Data":"f1ea5214c7dd221301e4a426f0c6725f6532430e6a189586ae6d315a61b6b69d"} Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.649749 4769 scope.go:117] "RemoveContainer" containerID="71f8ca34e7440c116ee32dc02c10b6feb26bc52f918a2a08a3ce6dea4b74924c" Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.650021 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84976bdf-f7s6r" Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.658258 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef5c65bd-86b7-418d-bc80-8df45db1b5a8","Type":"ContainerStarted","Data":"d91a490871ece7da51e72a6b88e953bd60b6b6e869189972281a12d961f44b73"} Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.691361 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-kube-api-access-q2q6z" (OuterVolumeSpecName: "kube-api-access-q2q6z") pod "48fc1d3f-c20f-4949-b0d7-5b52cff1efcf" (UID: "48fc1d3f-c20f-4949-b0d7-5b52cff1efcf"). InnerVolumeSpecName "kube-api-access-q2q6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.691612 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "48fc1d3f-c20f-4949-b0d7-5b52cff1efcf" (UID: "48fc1d3f-c20f-4949-b0d7-5b52cff1efcf"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.693735 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "48fc1d3f-c20f-4949-b0d7-5b52cff1efcf" (UID: "48fc1d3f-c20f-4949-b0d7-5b52cff1efcf"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.695090 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-config" (OuterVolumeSpecName: "config") pod "48fc1d3f-c20f-4949-b0d7-5b52cff1efcf" (UID: "48fc1d3f-c20f-4949-b0d7-5b52cff1efcf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.698994 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "48fc1d3f-c20f-4949-b0d7-5b52cff1efcf" (UID: "48fc1d3f-c20f-4949-b0d7-5b52cff1efcf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.725678 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2q6z\" (UniqueName: \"kubernetes.io/projected/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-kube-api-access-q2q6z\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.725729 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.725745 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.725754 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:36 crc kubenswrapper[4769]: I1125 10:06:36.725767 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.083628 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84976bdf-f7s6r"] Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.104017 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f84976bdf-f7s6r"] Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.679913 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef5c65bd-86b7-418d-bc80-8df45db1b5a8","Type":"ContainerStarted","Data":"17ea494fb30ab2fb842a1d0094d13445137fc4c5cf53906984aa735164ec3a12"} Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.680291 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ef5c65bd-86b7-418d-bc80-8df45db1b5a8" containerName="glance-log" containerID="cri-o://d91a490871ece7da51e72a6b88e953bd60b6b6e869189972281a12d961f44b73" gracePeriod=30 Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.681117 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ef5c65bd-86b7-418d-bc80-8df45db1b5a8" containerName="glance-httpd" containerID="cri-o://17ea494fb30ab2fb842a1d0094d13445137fc4c5cf53906984aa735164ec3a12" gracePeriod=30 Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.685647 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-75dfc45cc4-m9wmn" event={"ID":"3259d7a5-de25-4fa9-a5f8-36fc56dd733f","Type":"ContainerStarted","Data":"a745ec1416c3b7dff8b5bc6898f434f97b8e356d63feb9671e13d6d8296a63aa"} Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.693763 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.707951 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" event={"ID":"3322c8d5-8fb0-4c40-8967-e20d2232a127","Type":"ContainerStarted","Data":"8813b534ff4b312a836881c16516d575312a5dbd84c70c6c4772b3670e7f9703"} Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.708252 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.734989 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=21.734946407 podStartE2EDuration="21.734946407s" podCreationTimestamp="2025-11-25 10:06:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:37.731505636 +0000 UTC m=+1346.316477949" watchObservedRunningTime="2025-11-25 10:06:37.734946407 +0000 UTC m=+1346.319918720" Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.752394 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"1d3df3f04447e8e425bca6df53ca17f9129e625eb34f1f41541ae21f3c182f56"} Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.752488 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"b39ce5fc6d41ec636bb95e65b37eb973f08bcd3c7c47e9136d57fd9d1466a3d7"} Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.759012 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c9685d5f7-xbblq" event={"ID":"bbd368c8-da9f-4504-89e4-ffc922d67c32","Type":"ContainerStarted","Data":"2806432c5f736ddb3d54acf37507f3fccc4d9f57c5a12e26e41d183dc33fb7d8"} Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.759069 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c9685d5f7-xbblq" event={"ID":"bbd368c8-da9f-4504-89e4-ffc922d67c32","Type":"ContainerStarted","Data":"ae703e608a4c4ee9518858bc855764353dc65268897b7a6127391f917bd09e0e"} Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.760431 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.771944 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1","Type":"ContainerStarted","Data":"5d597bdfbee16ad034a69638f9b446f9b14c006d7ad9dc8311808a9c1e5484aa"} Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.772007 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8ef4c37f-e3dc-4994-92bf-b41e7c215ef1","Type":"ContainerStarted","Data":"8801c4242ab6784a5d0a3662ecc4abee737f0716f378ff4df1371ef2926964e0"} Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.775621 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" podStartSLOduration=6.775601101 podStartE2EDuration="6.775601101s" podCreationTimestamp="2025-11-25 10:06:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:37.759345447 +0000 UTC m=+1346.344317780" watchObservedRunningTime="2025-11-25 10:06:37.775601101 +0000 UTC m=+1346.360573414" Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.792232 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-75dfc45cc4-m9wmn" podStartSLOduration=6.792190103 podStartE2EDuration="6.792190103s" podCreationTimestamp="2025-11-25 10:06:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:37.78006677 +0000 UTC m=+1346.365039083" watchObservedRunningTime="2025-11-25 10:06:37.792190103 +0000 UTC m=+1346.377162416" Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.828545 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=42.828522911 podStartE2EDuration="42.828522911s" podCreationTimestamp="2025-11-25 10:05:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:37.818496723 +0000 UTC m=+1346.403469036" watchObservedRunningTime="2025-11-25 10:06:37.828522911 +0000 UTC m=+1346.413495224" Nov 25 10:06:37 crc kubenswrapper[4769]: I1125 10:06:37.853594 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5c9685d5f7-xbblq" podStartSLOduration=3.853525447 podStartE2EDuration="3.853525447s" podCreationTimestamp="2025-11-25 10:06:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:37.837195612 +0000 UTC m=+1346.422167925" watchObservedRunningTime="2025-11-25 10:06:37.853525447 +0000 UTC m=+1346.438497760" Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.253710 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48fc1d3f-c20f-4949-b0d7-5b52cff1efcf" path="/var/lib/kubelet/pods/48fc1d3f-c20f-4949-b0d7-5b52cff1efcf/volumes" Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.786654 4769 generic.go:334] "Generic (PLEG): container finished" podID="ef5c65bd-86b7-418d-bc80-8df45db1b5a8" containerID="17ea494fb30ab2fb842a1d0094d13445137fc4c5cf53906984aa735164ec3a12" exitCode=0 Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.787029 4769 generic.go:334] "Generic (PLEG): container finished" podID="ef5c65bd-86b7-418d-bc80-8df45db1b5a8" containerID="d91a490871ece7da51e72a6b88e953bd60b6b6e869189972281a12d961f44b73" exitCode=143 Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.786737 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef5c65bd-86b7-418d-bc80-8df45db1b5a8","Type":"ContainerDied","Data":"17ea494fb30ab2fb842a1d0094d13445137fc4c5cf53906984aa735164ec3a12"} Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.787151 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef5c65bd-86b7-418d-bc80-8df45db1b5a8","Type":"ContainerDied","Data":"d91a490871ece7da51e72a6b88e953bd60b6b6e869189972281a12d961f44b73"} Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.787167 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ef5c65bd-86b7-418d-bc80-8df45db1b5a8","Type":"ContainerDied","Data":"491e24fa5cb83cd623bfd74b6918be7161d79d59eef54b63566f7e4075d6d1e5"} Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.787177 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="491e24fa5cb83cd623bfd74b6918be7161d79d59eef54b63566f7e4075d6d1e5" Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.791033 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fc0affe7-9d6e-4a96-90de-f8206cc38627","Type":"ContainerStarted","Data":"37b47c073f149e2e8f3655be80788981d706543ee8e1c3bad3ee3851ca46b4e5"} Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.791193 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fc0affe7-9d6e-4a96-90de-f8206cc38627" containerName="glance-log" containerID="cri-o://63477eaef06766d20bd5f9638cc991fa49165a1f3ba3609540ceaefc29b3f191" gracePeriod=30 Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.792186 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fc0affe7-9d6e-4a96-90de-f8206cc38627" containerName="glance-httpd" containerID="cri-o://37b47c073f149e2e8f3655be80788981d706543ee8e1c3bad3ee3851ca46b4e5" gracePeriod=30 Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.817649 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=21.817622264 podStartE2EDuration="21.817622264s" podCreationTimestamp="2025-11-25 10:06:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:38.812491318 +0000 UTC m=+1347.397463631" watchObservedRunningTime="2025-11-25 10:06:38.817622264 +0000 UTC m=+1347.402594577" Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.820011 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"2b6267eb7ca30af1b007962bb1e38f16149904dc94c545396db739b1e95bd9b7"} Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.820054 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"f7987cb4-5485-438f-bc01-c69e509b81a6","Type":"ContainerStarted","Data":"8cc15e4bba03da1e888163097514981a654cda6296c7110f806b2972dfa99337"} Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.852563 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.876547 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=45.656769993 podStartE2EDuration="1m38.876526124s" podCreationTimestamp="2025-11-25 10:05:00 +0000 UTC" firstStartedPulling="2025-11-25 10:05:38.475586088 +0000 UTC m=+1287.060558401" lastFinishedPulling="2025-11-25 10:06:31.695342219 +0000 UTC m=+1340.280314532" observedRunningTime="2025-11-25 10:06:38.87301757 +0000 UTC m=+1347.457989893" watchObservedRunningTime="2025-11-25 10:06:38.876526124 +0000 UTC m=+1347.461498437" Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.896439 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-combined-ca-bundle\") pod \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.896545 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zm29f\" (UniqueName: \"kubernetes.io/projected/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-kube-api-access-zm29f\") pod \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.896588 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-config-data\") pod \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.896618 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-scripts\") pod \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.896668 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-httpd-run\") pod \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.896775 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.896834 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-logs\") pod \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\" (UID: \"ef5c65bd-86b7-418d-bc80-8df45db1b5a8\") " Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.900325 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-logs" (OuterVolumeSpecName: "logs") pod "ef5c65bd-86b7-418d-bc80-8df45db1b5a8" (UID: "ef5c65bd-86b7-418d-bc80-8df45db1b5a8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.900759 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ef5c65bd-86b7-418d-bc80-8df45db1b5a8" (UID: "ef5c65bd-86b7-418d-bc80-8df45db1b5a8"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.917172 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-scripts" (OuterVolumeSpecName: "scripts") pod "ef5c65bd-86b7-418d-bc80-8df45db1b5a8" (UID: "ef5c65bd-86b7-418d-bc80-8df45db1b5a8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.917184 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "ef5c65bd-86b7-418d-bc80-8df45db1b5a8" (UID: "ef5c65bd-86b7-418d-bc80-8df45db1b5a8"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 10:06:38 crc kubenswrapper[4769]: I1125 10:06:38.917318 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-kube-api-access-zm29f" (OuterVolumeSpecName: "kube-api-access-zm29f") pod "ef5c65bd-86b7-418d-bc80-8df45db1b5a8" (UID: "ef5c65bd-86b7-418d-bc80-8df45db1b5a8"). InnerVolumeSpecName "kube-api-access-zm29f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.000944 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.001012 4769 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.001056 4769 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.001067 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.001077 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zm29f\" (UniqueName: \"kubernetes.io/projected/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-kube-api-access-zm29f\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.019220 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ef5c65bd-86b7-418d-bc80-8df45db1b5a8" (UID: "ef5c65bd-86b7-418d-bc80-8df45db1b5a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.053479 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-config-data" (OuterVolumeSpecName: "config-data") pod "ef5c65bd-86b7-418d-bc80-8df45db1b5a8" (UID: "ef5c65bd-86b7-418d-bc80-8df45db1b5a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.056503 4769 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.103855 4769 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.103903 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.103914 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ef5c65bd-86b7-418d-bc80-8df45db1b5a8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.185976 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-rp7dd"] Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.246979 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-nzkfd"] Nov 25 10:06:39 crc kubenswrapper[4769]: E1125 10:06:39.247792 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef5c65bd-86b7-418d-bc80-8df45db1b5a8" containerName="glance-httpd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.247823 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef5c65bd-86b7-418d-bc80-8df45db1b5a8" containerName="glance-httpd" Nov 25 10:06:39 crc kubenswrapper[4769]: E1125 10:06:39.247871 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48fc1d3f-c20f-4949-b0d7-5b52cff1efcf" containerName="init" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.247880 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="48fc1d3f-c20f-4949-b0d7-5b52cff1efcf" containerName="init" Nov 25 10:06:39 crc kubenswrapper[4769]: E1125 10:06:39.247905 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef5c65bd-86b7-418d-bc80-8df45db1b5a8" containerName="glance-log" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.247914 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef5c65bd-86b7-418d-bc80-8df45db1b5a8" containerName="glance-log" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.248406 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef5c65bd-86b7-418d-bc80-8df45db1b5a8" containerName="glance-log" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.248440 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="48fc1d3f-c20f-4949-b0d7-5b52cff1efcf" containerName="init" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.248461 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef5c65bd-86b7-418d-bc80-8df45db1b5a8" containerName="glance-httpd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.250224 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.255102 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.265947 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-nzkfd"] Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.314489 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.314566 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmq88\" (UniqueName: \"kubernetes.io/projected/938480bb-3a3b-4131-8fd0-ba63a9b2755a-kube-api-access-rmq88\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.314602 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.314650 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.314676 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-config\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.314710 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.416545 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmq88\" (UniqueName: \"kubernetes.io/projected/938480bb-3a3b-4131-8fd0-ba63a9b2755a-kube-api-access-rmq88\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.416620 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.416683 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.416713 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-config\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.416747 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.417018 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.420016 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-dns-swift-storage-0\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.421318 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-dns-svc\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.422026 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-ovsdbserver-nb\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.422162 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-ovsdbserver-sb\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.434090 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-config\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.445216 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmq88\" (UniqueName: \"kubernetes.io/projected/938480bb-3a3b-4131-8fd0-ba63a9b2755a-kube-api-access-rmq88\") pod \"dnsmasq-dns-84b966f6c9-nzkfd\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.591548 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.834577 4769 generic.go:334] "Generic (PLEG): container finished" podID="fc0affe7-9d6e-4a96-90de-f8206cc38627" containerID="37b47c073f149e2e8f3655be80788981d706543ee8e1c3bad3ee3851ca46b4e5" exitCode=0 Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.834679 4769 generic.go:334] "Generic (PLEG): container finished" podID="fc0affe7-9d6e-4a96-90de-f8206cc38627" containerID="63477eaef06766d20bd5f9638cc991fa49165a1f3ba3609540ceaefc29b3f191" exitCode=143 Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.835824 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fc0affe7-9d6e-4a96-90de-f8206cc38627","Type":"ContainerDied","Data":"37b47c073f149e2e8f3655be80788981d706543ee8e1c3bad3ee3851ca46b4e5"} Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.835858 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fc0affe7-9d6e-4a96-90de-f8206cc38627","Type":"ContainerDied","Data":"63477eaef06766d20bd5f9638cc991fa49165a1f3ba3609540ceaefc29b3f191"} Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.835909 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.836303 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" podUID="3322c8d5-8fb0-4c40-8967-e20d2232a127" containerName="dnsmasq-dns" containerID="cri-o://8813b534ff4b312a836881c16516d575312a5dbd84c70c6c4772b3670e7f9703" gracePeriod=10 Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.951090 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.967456 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.987705 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.990485 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:06:39 crc kubenswrapper[4769]: I1125 10:06:39.998308 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.003547 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.004307 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.137558 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.137828 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.137992 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4ea324d5-69e1-4f0b-982f-15b0de3ec539-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.138094 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-config-data\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.138219 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-scripts\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.138387 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.138500 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ea324d5-69e1-4f0b-982f-15b0de3ec539-logs\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.138632 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c92jq\" (UniqueName: \"kubernetes.io/projected/4ea324d5-69e1-4f0b-982f-15b0de3ec539-kube-api-access-c92jq\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.240042 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.240396 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4ea324d5-69e1-4f0b-982f-15b0de3ec539-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.240503 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-config-data\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.240599 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-scripts\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.240747 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.240829 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ea324d5-69e1-4f0b-982f-15b0de3ec539-logs\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.240917 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c92jq\" (UniqueName: \"kubernetes.io/projected/4ea324d5-69e1-4f0b-982f-15b0de3ec539-kube-api-access-c92jq\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.240955 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4ea324d5-69e1-4f0b-982f-15b0de3ec539-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.241104 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.241411 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ea324d5-69e1-4f0b-982f-15b0de3ec539-logs\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.241523 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.247808 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-scripts\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.248061 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.249387 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-config-data\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.254441 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef5c65bd-86b7-418d-bc80-8df45db1b5a8" path="/var/lib/kubelet/pods/ef5c65bd-86b7-418d-bc80-8df45db1b5a8/volumes" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.257614 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.265635 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c92jq\" (UniqueName: \"kubernetes.io/projected/4ea324d5-69e1-4f0b-982f-15b0de3ec539-kube-api-access-c92jq\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.350761 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.635790 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.850856 4769 generic.go:334] "Generic (PLEG): container finished" podID="29edc9fc-ffe1-4511-9f89-9d0795f063cb" containerID="09a8800fabbe0b8cff7aa77a649828ff9cb1df93447412766ac0f90f2cee23c2" exitCode=0 Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.850943 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-9zbdp" event={"ID":"29edc9fc-ffe1-4511-9f89-9d0795f063cb","Type":"ContainerDied","Data":"09a8800fabbe0b8cff7aa77a649828ff9cb1df93447412766ac0f90f2cee23c2"} Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.855238 4769 generic.go:334] "Generic (PLEG): container finished" podID="3322c8d5-8fb0-4c40-8967-e20d2232a127" containerID="8813b534ff4b312a836881c16516d575312a5dbd84c70c6c4772b3670e7f9703" exitCode=0 Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.855300 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" event={"ID":"3322c8d5-8fb0-4c40-8967-e20d2232a127","Type":"ContainerDied","Data":"8813b534ff4b312a836881c16516d575312a5dbd84c70c6c4772b3670e7f9703"} Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.930916 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.931314 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 25 10:06:40 crc kubenswrapper[4769]: I1125 10:06:40.937546 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 25 10:06:41 crc kubenswrapper[4769]: I1125 10:06:41.880201 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 25 10:06:41 crc kubenswrapper[4769]: I1125 10:06:41.897678 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" podUID="3322c8d5-8fb0-4c40-8967-e20d2232a127" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.189:5353: connect: connection refused" Nov 25 10:06:42 crc kubenswrapper[4769]: I1125 10:06:42.895677 4769 generic.go:334] "Generic (PLEG): container finished" podID="ee29c33a-80a8-4f26-81bd-1af50bbbaabb" containerID="c7751374f75eda43b05b2eb51be43fe0c15ca757e586d1a21f51229c42e0f892" exitCode=0 Nov 25 10:06:42 crc kubenswrapper[4769]: I1125 10:06:42.896150 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qkr9g" event={"ID":"ee29c33a-80a8-4f26-81bd-1af50bbbaabb","Type":"ContainerDied","Data":"c7751374f75eda43b05b2eb51be43fe0c15ca757e586d1a21f51229c42e0f892"} Nov 25 10:06:42 crc kubenswrapper[4769]: I1125 10:06:42.918980 4769 generic.go:334] "Generic (PLEG): container finished" podID="175271fe-6677-49b5-b497-c45ef1816fb7" containerID="978d649392af26e7b658de6ba64a81ab9a59b5f0291633e52cc1e43cea2315f7" exitCode=0 Nov 25 10:06:42 crc kubenswrapper[4769]: I1125 10:06:42.920145 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pvsgh" event={"ID":"175271fe-6677-49b5-b497-c45ef1816fb7","Type":"ContainerDied","Data":"978d649392af26e7b658de6ba64a81ab9a59b5f0291633e52cc1e43cea2315f7"} Nov 25 10:06:44 crc kubenswrapper[4769]: I1125 10:06:44.933903 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:44 crc kubenswrapper[4769]: I1125 10:06:44.951501 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-pvsgh" event={"ID":"175271fe-6677-49b5-b497-c45ef1816fb7","Type":"ContainerDied","Data":"34dbacde582f659e8d43b96db337d5e219af871257cf42e6deb6b54de031e4af"} Nov 25 10:06:44 crc kubenswrapper[4769]: I1125 10:06:44.951535 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="34dbacde582f659e8d43b96db337d5e219af871257cf42e6deb6b54de031e4af" Nov 25 10:06:44 crc kubenswrapper[4769]: I1125 10:06:44.953626 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-9zbdp" event={"ID":"29edc9fc-ffe1-4511-9f89-9d0795f063cb","Type":"ContainerDied","Data":"2018496b59b712e564fb882795b5a8cb7aef6ee9dae4dc65365e6bde2e1c3559"} Nov 25 10:06:44 crc kubenswrapper[4769]: I1125 10:06:44.953671 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2018496b59b712e564fb882795b5a8cb7aef6ee9dae4dc65365e6bde2e1c3559" Nov 25 10:06:44 crc kubenswrapper[4769]: I1125 10:06:44.953688 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-9zbdp" Nov 25 10:06:44 crc kubenswrapper[4769]: I1125 10:06:44.964388 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qkr9g" event={"ID":"ee29c33a-80a8-4f26-81bd-1af50bbbaabb","Type":"ContainerDied","Data":"5458feb12a34eecf34938bcf7724589bc9dfd9b423c4424680d314eba27ecf92"} Nov 25 10:06:44 crc kubenswrapper[4769]: I1125 10:06:44.964422 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5458feb12a34eecf34938bcf7724589bc9dfd9b423c4424680d314eba27ecf92" Nov 25 10:06:44 crc kubenswrapper[4769]: I1125 10:06:44.964482 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qkr9g" Nov 25 10:06:44 crc kubenswrapper[4769]: I1125 10:06:44.977148 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:44 crc kubenswrapper[4769]: I1125 10:06:44.980660 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fc0affe7-9d6e-4a96-90de-f8206cc38627","Type":"ContainerDied","Data":"e923c8eb4c22a2913ee61530bea25d09a29dc94f6de8497d20efd3e29e56e0fb"} Nov 25 10:06:44 crc kubenswrapper[4769]: I1125 10:06:44.980717 4769 scope.go:117] "RemoveContainer" containerID="37b47c073f149e2e8f3655be80788981d706543ee8e1c3bad3ee3851ca46b4e5" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.029147 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.072572 4769 scope.go:117] "RemoveContainer" containerID="63477eaef06766d20bd5f9638cc991fa49165a1f3ba3609540ceaefc29b3f191" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.079999 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fc0affe7-9d6e-4a96-90de-f8206cc38627-httpd-run\") pod \"fc0affe7-9d6e-4a96-90de-f8206cc38627\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080059 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-scripts\") pod \"fc0affe7-9d6e-4a96-90de-f8206cc38627\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080097 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"fc0affe7-9d6e-4a96-90de-f8206cc38627\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080136 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-credential-keys\") pod \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080165 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-scripts\") pod \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080197 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-config-data\") pod \"fc0affe7-9d6e-4a96-90de-f8206cc38627\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080221 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-combined-ca-bundle\") pod \"fc0affe7-9d6e-4a96-90de-f8206cc38627\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080244 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dg2tm\" (UniqueName: \"kubernetes.io/projected/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-kube-api-access-dg2tm\") pod \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080275 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xgj5\" (UniqueName: \"kubernetes.io/projected/fc0affe7-9d6e-4a96-90de-f8206cc38627-kube-api-access-7xgj5\") pod \"fc0affe7-9d6e-4a96-90de-f8206cc38627\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080315 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-config-data\") pod \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080347 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvk5z\" (UniqueName: \"kubernetes.io/projected/29edc9fc-ffe1-4511-9f89-9d0795f063cb-kube-api-access-vvk5z\") pod \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080401 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-combined-ca-bundle\") pod \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080427 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-config-data\") pod \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080507 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-combined-ca-bundle\") pod \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080521 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc0affe7-9d6e-4a96-90de-f8206cc38627-logs\") pod \"fc0affe7-9d6e-4a96-90de-f8206cc38627\" (UID: \"fc0affe7-9d6e-4a96-90de-f8206cc38627\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080555 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-fernet-keys\") pod \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\" (UID: \"ee29c33a-80a8-4f26-81bd-1af50bbbaabb\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080575 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-scripts\") pod \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.080601 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29edc9fc-ffe1-4511-9f89-9d0795f063cb-logs\") pod \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\" (UID: \"29edc9fc-ffe1-4511-9f89-9d0795f063cb\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.081662 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29edc9fc-ffe1-4511-9f89-9d0795f063cb-logs" (OuterVolumeSpecName: "logs") pod "29edc9fc-ffe1-4511-9f89-9d0795f063cb" (UID: "29edc9fc-ffe1-4511-9f89-9d0795f063cb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.082605 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc0affe7-9d6e-4a96-90de-f8206cc38627-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "fc0affe7-9d6e-4a96-90de-f8206cc38627" (UID: "fc0affe7-9d6e-4a96-90de-f8206cc38627"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.084369 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc0affe7-9d6e-4a96-90de-f8206cc38627-logs" (OuterVolumeSpecName: "logs") pod "fc0affe7-9d6e-4a96-90de-f8206cc38627" (UID: "fc0affe7-9d6e-4a96-90de-f8206cc38627"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.110041 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ee29c33a-80a8-4f26-81bd-1af50bbbaabb" (UID: "ee29c33a-80a8-4f26-81bd-1af50bbbaabb"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.110626 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc0affe7-9d6e-4a96-90de-f8206cc38627-kube-api-access-7xgj5" (OuterVolumeSpecName: "kube-api-access-7xgj5") pod "fc0affe7-9d6e-4a96-90de-f8206cc38627" (UID: "fc0affe7-9d6e-4a96-90de-f8206cc38627"). InnerVolumeSpecName "kube-api-access-7xgj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.111210 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "fc0affe7-9d6e-4a96-90de-f8206cc38627" (UID: "fc0affe7-9d6e-4a96-90de-f8206cc38627"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.111234 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-scripts" (OuterVolumeSpecName: "scripts") pod "ee29c33a-80a8-4f26-81bd-1af50bbbaabb" (UID: "ee29c33a-80a8-4f26-81bd-1af50bbbaabb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.113043 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-scripts" (OuterVolumeSpecName: "scripts") pod "fc0affe7-9d6e-4a96-90de-f8206cc38627" (UID: "fc0affe7-9d6e-4a96-90de-f8206cc38627"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.116185 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29edc9fc-ffe1-4511-9f89-9d0795f063cb-kube-api-access-vvk5z" (OuterVolumeSpecName: "kube-api-access-vvk5z") pod "29edc9fc-ffe1-4511-9f89-9d0795f063cb" (UID: "29edc9fc-ffe1-4511-9f89-9d0795f063cb"). InnerVolumeSpecName "kube-api-access-vvk5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.116407 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-scripts" (OuterVolumeSpecName: "scripts") pod "29edc9fc-ffe1-4511-9f89-9d0795f063cb" (UID: "29edc9fc-ffe1-4511-9f89-9d0795f063cb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.116880 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-kube-api-access-dg2tm" (OuterVolumeSpecName: "kube-api-access-dg2tm") pod "ee29c33a-80a8-4f26-81bd-1af50bbbaabb" (UID: "ee29c33a-80a8-4f26-81bd-1af50bbbaabb"). InnerVolumeSpecName "kube-api-access-dg2tm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.117426 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "ee29c33a-80a8-4f26-81bd-1af50bbbaabb" (UID: "ee29c33a-80a8-4f26-81bd-1af50bbbaabb"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.156390 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.183257 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175271fe-6677-49b5-b497-c45ef1816fb7-combined-ca-bundle\") pod \"175271fe-6677-49b5-b497-c45ef1816fb7\" (UID: \"175271fe-6677-49b5-b497-c45ef1816fb7\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.184676 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nw7t4\" (UniqueName: \"kubernetes.io/projected/175271fe-6677-49b5-b497-c45ef1816fb7-kube-api-access-nw7t4\") pod \"175271fe-6677-49b5-b497-c45ef1816fb7\" (UID: \"175271fe-6677-49b5-b497-c45ef1816fb7\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.184842 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/175271fe-6677-49b5-b497-c45ef1816fb7-db-sync-config-data\") pod \"175271fe-6677-49b5-b497-c45ef1816fb7\" (UID: \"175271fe-6677-49b5-b497-c45ef1816fb7\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.186418 4769 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fc0affe7-9d6e-4a96-90de-f8206cc38627-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.186441 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.186494 4769 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.186504 4769 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.186514 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.186526 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dg2tm\" (UniqueName: \"kubernetes.io/projected/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-kube-api-access-dg2tm\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.186564 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xgj5\" (UniqueName: \"kubernetes.io/projected/fc0affe7-9d6e-4a96-90de-f8206cc38627-kube-api-access-7xgj5\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.186578 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvk5z\" (UniqueName: \"kubernetes.io/projected/29edc9fc-ffe1-4511-9f89-9d0795f063cb-kube-api-access-vvk5z\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.186591 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc0affe7-9d6e-4a96-90de-f8206cc38627-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.186602 4769 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.186638 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.186650 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29edc9fc-ffe1-4511-9f89-9d0795f063cb-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.204455 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/175271fe-6677-49b5-b497-c45ef1816fb7-kube-api-access-nw7t4" (OuterVolumeSpecName: "kube-api-access-nw7t4") pod "175271fe-6677-49b5-b497-c45ef1816fb7" (UID: "175271fe-6677-49b5-b497-c45ef1816fb7"). InnerVolumeSpecName "kube-api-access-nw7t4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.206608 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/175271fe-6677-49b5-b497-c45ef1816fb7-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "175271fe-6677-49b5-b497-c45ef1816fb7" (UID: "175271fe-6677-49b5-b497-c45ef1816fb7"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.274480 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee29c33a-80a8-4f26-81bd-1af50bbbaabb" (UID: "ee29c33a-80a8-4f26-81bd-1af50bbbaabb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.290640 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-ovsdbserver-sb\") pod \"3322c8d5-8fb0-4c40-8967-e20d2232a127\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.290743 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4lwbz\" (UniqueName: \"kubernetes.io/projected/3322c8d5-8fb0-4c40-8967-e20d2232a127-kube-api-access-4lwbz\") pod \"3322c8d5-8fb0-4c40-8967-e20d2232a127\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.290780 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-dns-svc\") pod \"3322c8d5-8fb0-4c40-8967-e20d2232a127\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.290868 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-ovsdbserver-nb\") pod \"3322c8d5-8fb0-4c40-8967-e20d2232a127\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.290947 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-config\") pod \"3322c8d5-8fb0-4c40-8967-e20d2232a127\" (UID: \"3322c8d5-8fb0-4c40-8967-e20d2232a127\") " Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.296060 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.296091 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nw7t4\" (UniqueName: \"kubernetes.io/projected/175271fe-6677-49b5-b497-c45ef1816fb7-kube-api-access-nw7t4\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.296103 4769 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/175271fe-6677-49b5-b497-c45ef1816fb7-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.299359 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fc0affe7-9d6e-4a96-90de-f8206cc38627" (UID: "fc0affe7-9d6e-4a96-90de-f8206cc38627"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.303049 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/175271fe-6677-49b5-b497-c45ef1816fb7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "175271fe-6677-49b5-b497-c45ef1816fb7" (UID: "175271fe-6677-49b5-b497-c45ef1816fb7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.303127 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3322c8d5-8fb0-4c40-8967-e20d2232a127-kube-api-access-4lwbz" (OuterVolumeSpecName: "kube-api-access-4lwbz") pod "3322c8d5-8fb0-4c40-8967-e20d2232a127" (UID: "3322c8d5-8fb0-4c40-8967-e20d2232a127"). InnerVolumeSpecName "kube-api-access-4lwbz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.312501 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-config-data" (OuterVolumeSpecName: "config-data") pod "29edc9fc-ffe1-4511-9f89-9d0795f063cb" (UID: "29edc9fc-ffe1-4511-9f89-9d0795f063cb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.320432 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-config-data" (OuterVolumeSpecName: "config-data") pod "ee29c33a-80a8-4f26-81bd-1af50bbbaabb" (UID: "ee29c33a-80a8-4f26-81bd-1af50bbbaabb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.326690 4769 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.338139 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "29edc9fc-ffe1-4511-9f89-9d0795f063cb" (UID: "29edc9fc-ffe1-4511-9f89-9d0795f063cb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.372789 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-nzkfd"] Nov 25 10:06:45 crc kubenswrapper[4769]: W1125 10:06:45.374692 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod938480bb_3a3b_4131_8fd0_ba63a9b2755a.slice/crio-54f614b2f2cc20d1efa1baa29cba02a70b322f0c2dffeec15224d84d6e68d1ac WatchSource:0}: Error finding container 54f614b2f2cc20d1efa1baa29cba02a70b322f0c2dffeec15224d84d6e68d1ac: Status 404 returned error can't find the container with id 54f614b2f2cc20d1efa1baa29cba02a70b322f0c2dffeec15224d84d6e68d1ac Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.387141 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-config-data" (OuterVolumeSpecName: "config-data") pod "fc0affe7-9d6e-4a96-90de-f8206cc38627" (UID: "fc0affe7-9d6e-4a96-90de-f8206cc38627"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.394298 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3322c8d5-8fb0-4c40-8967-e20d2232a127" (UID: "3322c8d5-8fb0-4c40-8967-e20d2232a127"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.399025 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.399086 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc0affe7-9d6e-4a96-90de-f8206cc38627-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.399104 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.399113 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29edc9fc-ffe1-4511-9f89-9d0795f063cb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.399122 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee29c33a-80a8-4f26-81bd-1af50bbbaabb-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.399152 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.399163 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4lwbz\" (UniqueName: \"kubernetes.io/projected/3322c8d5-8fb0-4c40-8967-e20d2232a127-kube-api-access-4lwbz\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.399175 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175271fe-6677-49b5-b497-c45ef1816fb7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.399185 4769 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.407765 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-config" (OuterVolumeSpecName: "config") pod "3322c8d5-8fb0-4c40-8967-e20d2232a127" (UID: "3322c8d5-8fb0-4c40-8967-e20d2232a127"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.425544 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3322c8d5-8fb0-4c40-8967-e20d2232a127" (UID: "3322c8d5-8fb0-4c40-8967-e20d2232a127"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.428120 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3322c8d5-8fb0-4c40-8967-e20d2232a127" (UID: "3322c8d5-8fb0-4c40-8967-e20d2232a127"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.458733 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:06:45 crc kubenswrapper[4769]: W1125 10:06:45.463381 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ea324d5_69e1_4f0b_982f_15b0de3ec539.slice/crio-b8f07bddbf31933390286a4f7ebde0469d938f43e192602e29eb9d14a2e844c7 WatchSource:0}: Error finding container b8f07bddbf31933390286a4f7ebde0469d938f43e192602e29eb9d14a2e844c7: Status 404 returned error can't find the container with id b8f07bddbf31933390286a4f7ebde0469d938f43e192602e29eb9d14a2e844c7 Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.501788 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.501853 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:45 crc kubenswrapper[4769]: I1125 10:06:45.501871 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3322c8d5-8fb0-4c40-8967-e20d2232a127-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.242024 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.298328 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.318889 4769 generic.go:334] "Generic (PLEG): container finished" podID="938480bb-3a3b-4131-8fd0-ba63a9b2755a" containerID="8380888e60f40e13b058e46ee97f87cdc42f4225d6d7c154562513af04d43536" exitCode=0 Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.482397 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-9zbdp" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.482555 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-pvsgh" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.482365 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-d4dbd5b55-g2prp"] Nov 25 10:06:46 crc kubenswrapper[4769]: E1125 10:06:46.485024 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc0affe7-9d6e-4a96-90de-f8206cc38627" containerName="glance-log" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485043 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc0affe7-9d6e-4a96-90de-f8206cc38627" containerName="glance-log" Nov 25 10:06:46 crc kubenswrapper[4769]: E1125 10:06:46.485073 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29edc9fc-ffe1-4511-9f89-9d0795f063cb" containerName="placement-db-sync" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485081 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="29edc9fc-ffe1-4511-9f89-9d0795f063cb" containerName="placement-db-sync" Nov 25 10:06:46 crc kubenswrapper[4769]: E1125 10:06:46.485097 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3322c8d5-8fb0-4c40-8967-e20d2232a127" containerName="dnsmasq-dns" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485108 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3322c8d5-8fb0-4c40-8967-e20d2232a127" containerName="dnsmasq-dns" Nov 25 10:06:46 crc kubenswrapper[4769]: E1125 10:06:46.485139 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3322c8d5-8fb0-4c40-8967-e20d2232a127" containerName="init" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485149 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3322c8d5-8fb0-4c40-8967-e20d2232a127" containerName="init" Nov 25 10:06:46 crc kubenswrapper[4769]: E1125 10:06:46.485175 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="175271fe-6677-49b5-b497-c45ef1816fb7" containerName="barbican-db-sync" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485186 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="175271fe-6677-49b5-b497-c45ef1816fb7" containerName="barbican-db-sync" Nov 25 10:06:46 crc kubenswrapper[4769]: E1125 10:06:46.485200 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc0affe7-9d6e-4a96-90de-f8206cc38627" containerName="glance-httpd" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485208 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc0affe7-9d6e-4a96-90de-f8206cc38627" containerName="glance-httpd" Nov 25 10:06:46 crc kubenswrapper[4769]: E1125 10:06:46.485226 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee29c33a-80a8-4f26-81bd-1af50bbbaabb" containerName="keystone-bootstrap" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485233 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee29c33a-80a8-4f26-81bd-1af50bbbaabb" containerName="keystone-bootstrap" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485527 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee29c33a-80a8-4f26-81bd-1af50bbbaabb" containerName="keystone-bootstrap" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485543 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc0affe7-9d6e-4a96-90de-f8206cc38627" containerName="glance-log" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485559 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="175271fe-6677-49b5-b497-c45ef1816fb7" containerName="barbican-db-sync" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485568 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc0affe7-9d6e-4a96-90de-f8206cc38627" containerName="glance-httpd" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485576 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="29edc9fc-ffe1-4511-9f89-9d0795f063cb" containerName="placement-db-sync" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.485597 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3322c8d5-8fb0-4c40-8967-e20d2232a127" containerName="dnsmasq-dns" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.489379 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fb745b69-rp7dd" event={"ID":"3322c8d5-8fb0-4c40-8967-e20d2232a127","Type":"ContainerDied","Data":"13f2654de99b4fcf7f1eb8972989548c88e7a0dc893d68076668966157aa8493"} Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.489436 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-5tt9b" event={"ID":"639faafa-26cf-4b2e-831b-bc95e327cb3b","Type":"ContainerStarted","Data":"6e87b8e8e24fee05ec911efe798d5d590b46bd2fd850b4c0f1cd1a9ff1457c3d"} Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.489620 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" event={"ID":"938480bb-3a3b-4131-8fd0-ba63a9b2755a","Type":"ContainerDied","Data":"8380888e60f40e13b058e46ee97f87cdc42f4225d6d7c154562513af04d43536"} Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.489638 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" event={"ID":"938480bb-3a3b-4131-8fd0-ba63a9b2755a","Type":"ContainerStarted","Data":"54f614b2f2cc20d1efa1baa29cba02a70b322f0c2dffeec15224d84d6e68d1ac"} Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.489649 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4ea324d5-69e1-4f0b-982f-15b0de3ec539","Type":"ContainerStarted","Data":"b8f07bddbf31933390286a4f7ebde0469d938f43e192602e29eb9d14a2e844c7"} Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.489661 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3373fd6b-9a0f-4268-8476-382740118f35","Type":"ContainerStarted","Data":"b060d04afd82927b6a9152e744e144cbe21ee13ef0ff7b33a921286f098efaf1"} Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.491055 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.491141 4769 scope.go:117] "RemoveContainer" containerID="8813b534ff4b312a836881c16516d575312a5dbd84c70c6c4772b3670e7f9703" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.524975 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.525250 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.525318 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.525370 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.526073 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 10:06:46 crc kubenswrapper[4769]: I1125 10:06:46.526336 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-bnc4t" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.600415 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-68856897d4-664jr"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.602796 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.608602 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.608840 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.614695 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.615009 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.615822 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-zhjgb" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.684216 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-d4dbd5b55-g2prp"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.708268 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-scripts\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.708301 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbbt4\" (UniqueName: \"kubernetes.io/projected/43d17ac5-45bb-488c-b28e-d076b6f279d8-kube-api-access-lbbt4\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.708340 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-credential-keys\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.708402 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-config-data\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.708431 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-public-tls-certs\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.708492 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-fernet-keys\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.708634 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-internal-tls-certs\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.708661 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-combined-ca-bundle\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.732510 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-68856897d4-664jr"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.780926 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-rp7dd"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.812897 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-internal-tls-certs\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.812942 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-public-tls-certs\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.812976 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-config-data\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.813023 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3e7e492-3f47-4688-b589-0c61adf29521-logs\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.813047 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-scripts\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.813066 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbbt4\" (UniqueName: \"kubernetes.io/projected/43d17ac5-45bb-488c-b28e-d076b6f279d8-kube-api-access-lbbt4\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.813085 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-credential-keys\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.813117 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-combined-ca-bundle\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.813142 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-config-data\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.813161 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-public-tls-certs\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.813202 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-fernet-keys\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.813224 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llz68\" (UniqueName: \"kubernetes.io/projected/e3e7e492-3f47-4688-b589-0c61adf29521-kube-api-access-llz68\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.813243 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-scripts\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.813302 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-internal-tls-certs\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.813327 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-combined-ca-bundle\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.834656 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-public-tls-certs\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.834691 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-fernet-keys\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.835316 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-config-data\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.839680 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-credential-keys\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.845553 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-combined-ca-bundle\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.857637 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-internal-tls-certs\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.860860 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43d17ac5-45bb-488c-b28e-d076b6f279d8-scripts\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.865691 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbbt4\" (UniqueName: \"kubernetes.io/projected/43d17ac5-45bb-488c-b28e-d076b6f279d8-kube-api-access-lbbt4\") pod \"keystone-d4dbd5b55-g2prp\" (UID: \"43d17ac5-45bb-488c-b28e-d076b6f279d8\") " pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.878362 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fb745b69-rp7dd"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.916343 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-internal-tls-certs\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.916417 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-public-tls-certs\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.916450 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-config-data\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.916505 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3e7e492-3f47-4688-b589-0c61adf29521-logs\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.916563 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-combined-ca-bundle\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.922720 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e3e7e492-3f47-4688-b589-0c61adf29521-logs\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.925144 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-public-tls-certs\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.932343 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-config-data\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.937192 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llz68\" (UniqueName: \"kubernetes.io/projected/e3e7e492-3f47-4688-b589-0c61adf29521-kube-api-access-llz68\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.937254 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-scripts\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.941734 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-combined-ca-bundle\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.943232 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-internal-tls-certs\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.945996 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3e7e492-3f47-4688-b589-0c61adf29521-scripts\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.956277 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-7ff7bc75d-dxtln"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.959265 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.966655 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.966980 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-6l42b" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.967197 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.970133 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llz68\" (UniqueName: \"kubernetes.io/projected/e3e7e492-3f47-4688-b589-0c61adf29521-kube-api-access-llz68\") pod \"placement-68856897d4-664jr\" (UID: \"e3e7e492-3f47-4688-b589-0c61adf29521\") " pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:46.972122 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7ff7bc75d-dxtln"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.046602 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-74bd96895f-7gn8z"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.046772 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-config-data\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.046826 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-logs\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.046876 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hv9jf\" (UniqueName: \"kubernetes.io/projected/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-kube-api-access-hv9jf\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.049072 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.053292 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-config-data-custom\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.054398 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.054588 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-combined-ca-bundle\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.071371 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-74bd96895f-7gn8z"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.134563 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.151068 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.157425 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-config-data\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.157487 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-logs\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.157524 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hv9jf\" (UniqueName: \"kubernetes.io/projected/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-kube-api-access-hv9jf\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.157555 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1d5d3ba2-441b-4a94-974d-142811c1d2b4-logs\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.157574 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d5d3ba2-441b-4a94-974d-142811c1d2b4-config-data\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.157598 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d5d3ba2-441b-4a94-974d-142811c1d2b4-combined-ca-bundle\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.157678 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d5d3ba2-441b-4a94-974d-142811c1d2b4-config-data-custom\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.157706 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hzk6\" (UniqueName: \"kubernetes.io/projected/1d5d3ba2-441b-4a94-974d-142811c1d2b4-kube-api-access-2hzk6\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.157732 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-config-data-custom\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.157800 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-combined-ca-bundle\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.163514 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-logs\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.175027 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-config-data\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.174823 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-combined-ca-bundle\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.180843 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-config-data-custom\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.186447 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-nzkfd"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.199599 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hv9jf\" (UniqueName: \"kubernetes.io/projected/e7bfc1de-a2ad-4c42-ab54-a4df40fdd797-kube-api-access-hv9jf\") pod \"barbican-keystone-listener-7ff7bc75d-dxtln\" (UID: \"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797\") " pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.210746 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.212384 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-5tt9b" podStartSLOduration=5.979647754 podStartE2EDuration="52.211938721s" podCreationTimestamp="2025-11-25 10:05:55 +0000 UTC" firstStartedPulling="2025-11-25 10:05:58.555854394 +0000 UTC m=+1307.140826707" lastFinishedPulling="2025-11-25 10:06:44.788145361 +0000 UTC m=+1353.373117674" observedRunningTime="2025-11-25 10:06:46.601630959 +0000 UTC m=+1355.186603272" watchObservedRunningTime="2025-11-25 10:06:47.211938721 +0000 UTC m=+1355.796911034" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.213543 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.217866 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.218301 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.260401 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1d5d3ba2-441b-4a94-974d-142811c1d2b4-logs\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.260453 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d5d3ba2-441b-4a94-974d-142811c1d2b4-config-data\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.260499 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d5d3ba2-441b-4a94-974d-142811c1d2b4-combined-ca-bundle\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.260573 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d5d3ba2-441b-4a94-974d-142811c1d2b4-config-data-custom\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.260595 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hzk6\" (UniqueName: \"kubernetes.io/projected/1d5d3ba2-441b-4a94-974d-142811c1d2b4-kube-api-access-2hzk6\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.261593 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1d5d3ba2-441b-4a94-974d-142811c1d2b4-logs\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.271641 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d5d3ba2-441b-4a94-974d-142811c1d2b4-combined-ca-bundle\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.273391 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1d5d3ba2-441b-4a94-974d-142811c1d2b4-config-data-custom\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.286761 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.290048 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hzk6\" (UniqueName: \"kubernetes.io/projected/1d5d3ba2-441b-4a94-974d-142811c1d2b4-kube-api-access-2hzk6\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.290397 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d5d3ba2-441b-4a94-974d-142811c1d2b4-config-data\") pod \"barbican-worker-74bd96895f-7gn8z\" (UID: \"1d5d3ba2-441b-4a94-974d-142811c1d2b4\") " pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.310999 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-8tf2v"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.313078 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.353231 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-8tf2v"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.363748 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.363863 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4ea2eb46-81be-409a-9f6b-8775f5458372-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.363900 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.363992 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.364050 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ea2eb46-81be-409a-9f6b-8775f5458372-logs\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.364085 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.365161 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.365233 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gfn6\" (UniqueName: \"kubernetes.io/projected/4ea2eb46-81be-409a-9f6b-8775f5458372-kube-api-access-7gfn6\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.374931 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-88f84b978-bzvgm"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.378336 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.383805 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.402534 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-88f84b978-bzvgm"] Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.412157 4769 scope.go:117] "RemoveContainer" containerID="102271a9bdb006e90ce1d9e16539600b102d1e5bbd3774642da00123c70da811" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.444211 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.482105 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-config\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.482152 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.482275 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.482436 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ea2eb46-81be-409a-9f6b-8775f5458372-logs\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.482479 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.482607 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.482643 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gfn6\" (UniqueName: \"kubernetes.io/projected/4ea2eb46-81be-409a-9f6b-8775f5458372-kube-api-access-7gfn6\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.482725 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kv2q\" (UniqueName: \"kubernetes.io/projected/42db85a3-0c1c-416f-b1a0-857c750bb2c8-kube-api-access-7kv2q\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.483077 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.483248 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.483342 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.483406 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4ea2eb46-81be-409a-9f6b-8775f5458372-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.483458 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ea2eb46-81be-409a-9f6b-8775f5458372-logs\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.483458 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.483565 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.484343 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.484385 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4ea2eb46-81be-409a-9f6b-8775f5458372-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.493228 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.503096 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-scripts\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.507475 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.514434 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-config-data\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.527005 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gfn6\" (UniqueName: \"kubernetes.io/projected/4ea2eb46-81be-409a-9f6b-8775f5458372-kube-api-access-7gfn6\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.570686 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4ea324d5-69e1-4f0b-982f-15b0de3ec539","Type":"ContainerStarted","Data":"4b84b1aa69963dad2fd6727776c50ccd8e6eb53f4ef4e9a309bc9059cadc7147"} Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.594664 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-combined-ca-bundle\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.594789 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzdlg\" (UniqueName: \"kubernetes.io/projected/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-kube-api-access-xzdlg\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.594811 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-config-data-custom\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.597045 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kv2q\" (UniqueName: \"kubernetes.io/projected/42db85a3-0c1c-416f-b1a0-857c750bb2c8-kube-api-access-7kv2q\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.597091 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-logs\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.597226 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.597334 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.597415 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.597469 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-config\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.597494 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.597535 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-config-data\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.600855 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.601618 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.601921 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.603471 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.610182 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-config\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.615647 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.623254 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kv2q\" (UniqueName: \"kubernetes.io/projected/42db85a3-0c1c-416f-b1a0-857c750bb2c8-kube-api-access-7kv2q\") pod \"dnsmasq-dns-75c8ddd69c-8tf2v\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.704644 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-logs\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.705378 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-logs\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.708551 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-config-data\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.708618 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-combined-ca-bundle\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.708704 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzdlg\" (UniqueName: \"kubernetes.io/projected/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-kube-api-access-xzdlg\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.708731 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-config-data-custom\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.725125 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-config-data-custom\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.732161 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-config-data\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.735028 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzdlg\" (UniqueName: \"kubernetes.io/projected/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-kube-api-access-xzdlg\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.751888 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-combined-ca-bundle\") pod \"barbican-api-88f84b978-bzvgm\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.835628 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:47 crc kubenswrapper[4769]: E1125 10:06:47.884354 4769 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 25 10:06:47 crc kubenswrapper[4769]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/938480bb-3a3b-4131-8fd0-ba63a9b2755a/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 25 10:06:47 crc kubenswrapper[4769]: > podSandboxID="54f614b2f2cc20d1efa1baa29cba02a70b322f0c2dffeec15224d84d6e68d1ac" Nov 25 10:06:47 crc kubenswrapper[4769]: E1125 10:06:47.884597 4769 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 25 10:06:47 crc kubenswrapper[4769]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n574hdbh5ddhffh5c4h5dbh5bfh8bh75h584h97h557h69h55fhbch66dh94h5fdh9fh56fh584h5bbhb5h75hcbh667h77h577h5bhf9hf4hb4q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-swift-storage-0,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-swift-storage-0,SubPath:dns-swift-storage-0,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-nb,SubPath:ovsdbserver-nb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-sb,SubPath:ovsdbserver-sb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rmq88,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-84b966f6c9-nzkfd_openstack(938480bb-3a3b-4131-8fd0-ba63a9b2755a): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/938480bb-3a3b-4131-8fd0-ba63a9b2755a/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 25 10:06:47 crc kubenswrapper[4769]: > logger="UnhandledError" Nov 25 10:06:47 crc kubenswrapper[4769]: E1125 10:06:47.886040 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/938480bb-3a3b-4131-8fd0-ba63a9b2755a/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" podUID="938480bb-3a3b-4131-8fd0-ba63a9b2755a" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.934199 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.941300 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:47 crc kubenswrapper[4769]: I1125 10:06:47.973006 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-74bd96895f-7gn8z" Nov 25 10:06:48 crc kubenswrapper[4769]: I1125 10:06:48.009358 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:48 crc kubenswrapper[4769]: I1125 10:06:48.022205 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:48 crc kubenswrapper[4769]: I1125 10:06:48.235280 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-d4dbd5b55-g2prp"] Nov 25 10:06:48 crc kubenswrapper[4769]: I1125 10:06:48.279942 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3322c8d5-8fb0-4c40-8967-e20d2232a127" path="/var/lib/kubelet/pods/3322c8d5-8fb0-4c40-8967-e20d2232a127/volumes" Nov 25 10:06:48 crc kubenswrapper[4769]: W1125 10:06:48.320085 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43d17ac5_45bb_488c_b28e_d076b6f279d8.slice/crio-2a2b3d6d6e1a6d26970237defd52402e5a02686aa92ffe18fa0ed46716dad9dd WatchSource:0}: Error finding container 2a2b3d6d6e1a6d26970237defd52402e5a02686aa92ffe18fa0ed46716dad9dd: Status 404 returned error can't find the container with id 2a2b3d6d6e1a6d26970237defd52402e5a02686aa92ffe18fa0ed46716dad9dd Nov 25 10:06:48 crc kubenswrapper[4769]: I1125 10:06:48.336610 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc0affe7-9d6e-4a96-90de-f8206cc38627" path="/var/lib/kubelet/pods/fc0affe7-9d6e-4a96-90de-f8206cc38627/volumes" Nov 25 10:06:48 crc kubenswrapper[4769]: I1125 10:06:48.652289 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lfvzs" event={"ID":"ed79e867-002b-4591-b757-0410b73a43ef","Type":"ContainerStarted","Data":"41ab74e3a94e982b2db87464ee5df37c27ce86c7c00fbe475355b5d6a6e8078e"} Nov 25 10:06:48 crc kubenswrapper[4769]: I1125 10:06:48.667079 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4ea324d5-69e1-4f0b-982f-15b0de3ec539","Type":"ContainerStarted","Data":"b29fbee263efb754baf1bfbecd2cbb37e73412708dcac6dfb72529016552877d"} Nov 25 10:06:48 crc kubenswrapper[4769]: I1125 10:06:48.673238 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d4dbd5b55-g2prp" event={"ID":"43d17ac5-45bb-488c-b28e-d076b6f279d8","Type":"ContainerStarted","Data":"2a2b3d6d6e1a6d26970237defd52402e5a02686aa92ffe18fa0ed46716dad9dd"} Nov 25 10:06:48 crc kubenswrapper[4769]: I1125 10:06:48.712008 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-lfvzs" podStartSLOduration=6.904280349 podStartE2EDuration="53.711941346s" podCreationTimestamp="2025-11-25 10:05:55 +0000 UTC" firstStartedPulling="2025-11-25 10:05:58.886088193 +0000 UTC m=+1307.471060506" lastFinishedPulling="2025-11-25 10:06:45.69374918 +0000 UTC m=+1354.278721503" observedRunningTime="2025-11-25 10:06:48.693864164 +0000 UTC m=+1357.278836477" watchObservedRunningTime="2025-11-25 10:06:48.711941346 +0000 UTC m=+1357.296913659" Nov 25 10:06:48 crc kubenswrapper[4769]: I1125 10:06:48.767367 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=9.767339762 podStartE2EDuration="9.767339762s" podCreationTimestamp="2025-11-25 10:06:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:48.742248514 +0000 UTC m=+1357.327220827" watchObservedRunningTime="2025-11-25 10:06:48.767339762 +0000 UTC m=+1357.352312075" Nov 25 10:06:48 crc kubenswrapper[4769]: I1125 10:06:48.999356 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-68856897d4-664jr"] Nov 25 10:06:49 crc kubenswrapper[4769]: I1125 10:06:49.725159 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-68856897d4-664jr" event={"ID":"e3e7e492-3f47-4688-b589-0c61adf29521","Type":"ContainerStarted","Data":"f8f03f40f961812789c91058e9dae4b9847e52a06cd5b4a7107876bcc0c5fc68"} Nov 25 10:06:49 crc kubenswrapper[4769]: I1125 10:06:49.729454 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.094095 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.127938 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-74bd96895f-7gn8z"] Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.128508 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmq88\" (UniqueName: \"kubernetes.io/projected/938480bb-3a3b-4131-8fd0-ba63a9b2755a-kube-api-access-rmq88\") pod \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.128625 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-ovsdbserver-sb\") pod \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.128739 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-config\") pod \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.128845 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-dns-svc\") pod \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.129015 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-dns-swift-storage-0\") pod \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.129115 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-ovsdbserver-nb\") pod \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\" (UID: \"938480bb-3a3b-4131-8fd0-ba63a9b2755a\") " Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.149707 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/938480bb-3a3b-4131-8fd0-ba63a9b2755a-kube-api-access-rmq88" (OuterVolumeSpecName: "kube-api-access-rmq88") pod "938480bb-3a3b-4131-8fd0-ba63a9b2755a" (UID: "938480bb-3a3b-4131-8fd0-ba63a9b2755a"). InnerVolumeSpecName "kube-api-access-rmq88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.191132 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-7ff7bc75d-dxtln"] Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.221364 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-8tf2v"] Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.232082 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmq88\" (UniqueName: \"kubernetes.io/projected/938480bb-3a3b-4131-8fd0-ba63a9b2755a-kube-api-access-rmq88\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.277443 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-config" (OuterVolumeSpecName: "config") pod "938480bb-3a3b-4131-8fd0-ba63a9b2755a" (UID: "938480bb-3a3b-4131-8fd0-ba63a9b2755a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.292113 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "938480bb-3a3b-4131-8fd0-ba63a9b2755a" (UID: "938480bb-3a3b-4131-8fd0-ba63a9b2755a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.308359 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "938480bb-3a3b-4131-8fd0-ba63a9b2755a" (UID: "938480bb-3a3b-4131-8fd0-ba63a9b2755a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.327877 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "938480bb-3a3b-4131-8fd0-ba63a9b2755a" (UID: "938480bb-3a3b-4131-8fd0-ba63a9b2755a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.350476 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "938480bb-3a3b-4131-8fd0-ba63a9b2755a" (UID: "938480bb-3a3b-4131-8fd0-ba63a9b2755a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.350529 4769 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.350563 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.350575 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.350589 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.400933 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-88f84b978-bzvgm"] Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.452898 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938480bb-3a3b-4131-8fd0-ba63a9b2755a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.643842 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.644683 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.756927 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4ea2eb46-81be-409a-9f6b-8775f5458372","Type":"ContainerStarted","Data":"bf5c5d10ced4a4c692efcd4379f2eee3b2c972c373664dfba43854f9e4f33372"} Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.761185 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d4dbd5b55-g2prp" event={"ID":"43d17ac5-45bb-488c-b28e-d076b6f279d8","Type":"ContainerStarted","Data":"d78540e00d76912d0e62798e10de3f1004f3c9a7fd28257b498ba3a05ac0c173"} Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.761283 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.763202 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" event={"ID":"42db85a3-0c1c-416f-b1a0-857c750bb2c8","Type":"ContainerStarted","Data":"5e189cc41795bd16e3952feef0e19f8f033f2018110459328704e305b70708ce"} Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.773641 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" event={"ID":"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797","Type":"ContainerStarted","Data":"dde38523a35804865ab67a4dcc60046fe394a63ffac98a4564d58c647b18d594"} Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.795590 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" event={"ID":"938480bb-3a3b-4131-8fd0-ba63a9b2755a","Type":"ContainerDied","Data":"54f614b2f2cc20d1efa1baa29cba02a70b322f0c2dffeec15224d84d6e68d1ac"} Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.795667 4769 scope.go:117] "RemoveContainer" containerID="8380888e60f40e13b058e46ee97f87cdc42f4225d6d7c154562513af04d43536" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.795880 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84b966f6c9-nzkfd" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.803889 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-d4dbd5b55-g2prp" podStartSLOduration=4.803855073 podStartE2EDuration="4.803855073s" podCreationTimestamp="2025-11-25 10:06:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:50.779448833 +0000 UTC m=+1359.364421136" watchObservedRunningTime="2025-11-25 10:06:50.803855073 +0000 UTC m=+1359.388827376" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.811097 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-74bd96895f-7gn8z" event={"ID":"1d5d3ba2-441b-4a94-974d-142811c1d2b4","Type":"ContainerStarted","Data":"77b57582c975a96d0ed9e84b81ddc69f1b5c4cf32efd75f0eaf15bf36a695095"} Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.813212 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.833263 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.836276 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-68856897d4-664jr" event={"ID":"e3e7e492-3f47-4688-b589-0c61adf29521","Type":"ContainerStarted","Data":"4750ff8d8ded527c3b5a15552c100a57c58e9866ad0f08eba299f0b8d94b2356"} Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.836349 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-68856897d4-664jr" event={"ID":"e3e7e492-3f47-4688-b589-0c61adf29521","Type":"ContainerStarted","Data":"a930c62dec55a366f8c43a3bcd8bf68c4ae88e6d8f02283552cb9ccea43da6e2"} Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.837824 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.837866 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-68856897d4-664jr" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.855211 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-88f84b978-bzvgm" event={"ID":"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06","Type":"ContainerStarted","Data":"718f7b81cfdf6d55599f44a028ec66b96917d28c1a911f370cc561c618b6f4ab"} Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.855358 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.856281 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.901468 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-56bfd676cd-c9dgn"] Nov 25 10:06:50 crc kubenswrapper[4769]: E1125 10:06:50.902098 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="938480bb-3a3b-4131-8fd0-ba63a9b2755a" containerName="init" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.902113 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="938480bb-3a3b-4131-8fd0-ba63a9b2755a" containerName="init" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.902390 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="938480bb-3a3b-4131-8fd0-ba63a9b2755a" containerName="init" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.904195 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.916140 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.916391 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.983420 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-56bfd676cd-c9dgn"] Nov 25 10:06:50 crc kubenswrapper[4769]: I1125 10:06:50.989402 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-68856897d4-664jr" podStartSLOduration=4.989378636 podStartE2EDuration="4.989378636s" podCreationTimestamp="2025-11-25 10:06:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:50.966324882 +0000 UTC m=+1359.551297205" watchObservedRunningTime="2025-11-25 10:06:50.989378636 +0000 UTC m=+1359.574350939" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.079841 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-config-data-custom\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.080037 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-config-data\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.080069 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-public-tls-certs\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.080104 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-combined-ca-bundle\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.080169 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28aedc7b-6594-4c9d-9b2e-ea66937919f2-logs\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.080209 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkrtq\" (UniqueName: \"kubernetes.io/projected/28aedc7b-6594-4c9d-9b2e-ea66937919f2-kube-api-access-kkrtq\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.080238 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-internal-tls-certs\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.106701 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-nzkfd"] Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.133427 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84b966f6c9-nzkfd"] Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.189220 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28aedc7b-6594-4c9d-9b2e-ea66937919f2-logs\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.189330 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkrtq\" (UniqueName: \"kubernetes.io/projected/28aedc7b-6594-4c9d-9b2e-ea66937919f2-kube-api-access-kkrtq\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.189370 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-internal-tls-certs\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.189527 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-config-data-custom\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.189735 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-config-data\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.189768 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-public-tls-certs\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.189797 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-combined-ca-bundle\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.198921 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28aedc7b-6594-4c9d-9b2e-ea66937919f2-logs\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.202286 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-combined-ca-bundle\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.218729 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-internal-tls-certs\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.219414 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-config-data-custom\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.233386 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkrtq\" (UniqueName: \"kubernetes.io/projected/28aedc7b-6594-4c9d-9b2e-ea66937919f2-kube-api-access-kkrtq\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.234047 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-config-data\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.258832 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/28aedc7b-6594-4c9d-9b2e-ea66937919f2-public-tls-certs\") pod \"barbican-api-56bfd676cd-c9dgn\" (UID: \"28aedc7b-6594-4c9d-9b2e-ea66937919f2\") " pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.295174 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.905822 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-88f84b978-bzvgm" event={"ID":"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06","Type":"ContainerStarted","Data":"ffbb0e32fb8457f11d550040a0ac39717e3c5160cd004ff8752ffb9f9f0247f3"} Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.906615 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-88f84b978-bzvgm" event={"ID":"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06","Type":"ContainerStarted","Data":"9eae5c3664531ffe21078f8ed4e4a78f2e26c0c12c49c4291bf57ee12e079532"} Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.906636 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.906649 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.919696 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4ea2eb46-81be-409a-9f6b-8775f5458372","Type":"ContainerStarted","Data":"5c0bff317d73f056f37cf0fcc331317bdf7c0cfb69ae4a456b42915a028ac8b0"} Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.924057 4769 generic.go:334] "Generic (PLEG): container finished" podID="42db85a3-0c1c-416f-b1a0-857c750bb2c8" containerID="01c0fe592c10b6df89d8a513475f6241d356ab21fb56a81fc3cdff8c30f217aa" exitCode=0 Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.925018 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" event={"ID":"42db85a3-0c1c-416f-b1a0-857c750bb2c8","Type":"ContainerDied","Data":"01c0fe592c10b6df89d8a513475f6241d356ab21fb56a81fc3cdff8c30f217aa"} Nov 25 10:06:51 crc kubenswrapper[4769]: I1125 10:06:51.941685 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-88f84b978-bzvgm" podStartSLOduration=5.941665588 podStartE2EDuration="5.941665588s" podCreationTimestamp="2025-11-25 10:06:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:51.932393701 +0000 UTC m=+1360.517366014" watchObservedRunningTime="2025-11-25 10:06:51.941665588 +0000 UTC m=+1360.526637901" Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.062637 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-56bfd676cd-c9dgn"] Nov 25 10:06:52 crc kubenswrapper[4769]: W1125 10:06:52.135781 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28aedc7b_6594_4c9d_9b2e_ea66937919f2.slice/crio-7c7be9b166da5315975c3999680dd7980895a369d67806387347d52faaf83304 WatchSource:0}: Error finding container 7c7be9b166da5315975c3999680dd7980895a369d67806387347d52faaf83304: Status 404 returned error can't find the container with id 7c7be9b166da5315975c3999680dd7980895a369d67806387347d52faaf83304 Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.260034 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="938480bb-3a3b-4131-8fd0-ba63a9b2755a" path="/var/lib/kubelet/pods/938480bb-3a3b-4131-8fd0-ba63a9b2755a/volumes" Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.294479 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.294520 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.294562 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.295440 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"94adcb75d0d9250faebaf531d11103a1b9f1da8a7a156454e1964c577d865b3a"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.295512 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://94adcb75d0d9250faebaf531d11103a1b9f1da8a7a156454e1964c577d865b3a" gracePeriod=600 Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.953618 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="94adcb75d0d9250faebaf531d11103a1b9f1da8a7a156454e1964c577d865b3a" exitCode=0 Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.954515 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"94adcb75d0d9250faebaf531d11103a1b9f1da8a7a156454e1964c577d865b3a"} Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.954574 4769 scope.go:117] "RemoveContainer" containerID="cb83aff5daeabf3c346d649af9f679bac8007d12e211a6552aed7e17452f752f" Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.972175 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" event={"ID":"42db85a3-0c1c-416f-b1a0-857c750bb2c8","Type":"ContainerStarted","Data":"b9e0ae79fee228165fdacb7b309756b0ed9065ffdcb684887bbe6dd5d367eb50"} Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.974383 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.984430 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56bfd676cd-c9dgn" event={"ID":"28aedc7b-6594-4c9d-9b2e-ea66937919f2","Type":"ContainerStarted","Data":"6268c5719b2d36757092489a514d3f0d79f3faba9058b3d5e403b0acc76716a1"} Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.984473 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56bfd676cd-c9dgn" event={"ID":"28aedc7b-6594-4c9d-9b2e-ea66937919f2","Type":"ContainerStarted","Data":"7c7be9b166da5315975c3999680dd7980895a369d67806387347d52faaf83304"} Nov 25 10:06:52 crc kubenswrapper[4769]: I1125 10:06:52.988750 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4ea2eb46-81be-409a-9f6b-8775f5458372","Type":"ContainerStarted","Data":"324d6d2418fce58a565ec648cf71a995ff8b4854821799494437c7cf85e2042c"} Nov 25 10:06:53 crc kubenswrapper[4769]: I1125 10:06:53.011587 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" podStartSLOduration=7.011571205 podStartE2EDuration="7.011571205s" podCreationTimestamp="2025-11-25 10:06:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:52.99149385 +0000 UTC m=+1361.576466163" watchObservedRunningTime="2025-11-25 10:06:53.011571205 +0000 UTC m=+1361.596543518" Nov 25 10:06:53 crc kubenswrapper[4769]: I1125 10:06:53.028829 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=7.028808924 podStartE2EDuration="7.028808924s" podCreationTimestamp="2025-11-25 10:06:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:53.02415268 +0000 UTC m=+1361.609124993" watchObservedRunningTime="2025-11-25 10:06:53.028808924 +0000 UTC m=+1361.613781237" Nov 25 10:06:54 crc kubenswrapper[4769]: I1125 10:06:54.018215 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7"} Nov 25 10:06:55 crc kubenswrapper[4769]: I1125 10:06:55.030814 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-56bfd676cd-c9dgn" event={"ID":"28aedc7b-6594-4c9d-9b2e-ea66937919f2","Type":"ContainerStarted","Data":"85bc70947f454addefb287033646a1a03846a0e26dde1291df166685e36a3b5b"} Nov 25 10:06:55 crc kubenswrapper[4769]: I1125 10:06:55.031697 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:55 crc kubenswrapper[4769]: I1125 10:06:55.040041 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" event={"ID":"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797","Type":"ContainerStarted","Data":"2e10cb3ef05dd704b31de0aadc949543626f82a4f1792d850f471e65b3a9340a"} Nov 25 10:06:55 crc kubenswrapper[4769]: I1125 10:06:55.040084 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" event={"ID":"e7bfc1de-a2ad-4c42-ab54-a4df40fdd797","Type":"ContainerStarted","Data":"9686aebe06317073b8a19879edc09a053c43294a29cc2f4bb5c55c64cc48fb50"} Nov 25 10:06:55 crc kubenswrapper[4769]: I1125 10:06:55.041798 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-74bd96895f-7gn8z" event={"ID":"1d5d3ba2-441b-4a94-974d-142811c1d2b4","Type":"ContainerStarted","Data":"f805a85256ddbdf4a6b12fd07b0696f104b68070c436a9c6656cbabec9d9c1c6"} Nov 25 10:06:55 crc kubenswrapper[4769]: I1125 10:06:55.058383 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-56bfd676cd-c9dgn" podStartSLOduration=5.05836426 podStartE2EDuration="5.05836426s" podCreationTimestamp="2025-11-25 10:06:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:06:55.05011092 +0000 UTC m=+1363.635083233" watchObservedRunningTime="2025-11-25 10:06:55.05836426 +0000 UTC m=+1363.643336573" Nov 25 10:06:56 crc kubenswrapper[4769]: I1125 10:06:56.056512 4769 generic.go:334] "Generic (PLEG): container finished" podID="639faafa-26cf-4b2e-831b-bc95e327cb3b" containerID="6e87b8e8e24fee05ec911efe798d5d590b46bd2fd850b4c0f1cd1a9ff1457c3d" exitCode=0 Nov 25 10:06:56 crc kubenswrapper[4769]: I1125 10:06:56.056575 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-5tt9b" event={"ID":"639faafa-26cf-4b2e-831b-bc95e327cb3b","Type":"ContainerDied","Data":"6e87b8e8e24fee05ec911efe798d5d590b46bd2fd850b4c0f1cd1a9ff1457c3d"} Nov 25 10:06:56 crc kubenswrapper[4769]: I1125 10:06:56.061096 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-74bd96895f-7gn8z" event={"ID":"1d5d3ba2-441b-4a94-974d-142811c1d2b4","Type":"ContainerStarted","Data":"5117dceac50d90e76478714251e2f6bd9e09c5dca61a50346585e5bf86050fa5"} Nov 25 10:06:56 crc kubenswrapper[4769]: I1125 10:06:56.062841 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:06:56 crc kubenswrapper[4769]: I1125 10:06:56.133878 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-74bd96895f-7gn8z" podStartSLOduration=6.128089715 podStartE2EDuration="10.133843364s" podCreationTimestamp="2025-11-25 10:06:46 +0000 UTC" firstStartedPulling="2025-11-25 10:06:50.175315846 +0000 UTC m=+1358.760288159" lastFinishedPulling="2025-11-25 10:06:54.181069495 +0000 UTC m=+1362.766041808" observedRunningTime="2025-11-25 10:06:56.115114675 +0000 UTC m=+1364.700086998" watchObservedRunningTime="2025-11-25 10:06:56.133843364 +0000 UTC m=+1364.718815677" Nov 25 10:06:56 crc kubenswrapper[4769]: I1125 10:06:56.165225 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-7ff7bc75d-dxtln" podStartSLOduration=6.212693919 podStartE2EDuration="10.16520233s" podCreationTimestamp="2025-11-25 10:06:46 +0000 UTC" firstStartedPulling="2025-11-25 10:06:50.228653017 +0000 UTC m=+1358.813625330" lastFinishedPulling="2025-11-25 10:06:54.181161428 +0000 UTC m=+1362.766133741" observedRunningTime="2025-11-25 10:06:56.139567557 +0000 UTC m=+1364.724539870" watchObservedRunningTime="2025-11-25 10:06:56.16520233 +0000 UTC m=+1364.750174643" Nov 25 10:06:56 crc kubenswrapper[4769]: I1125 10:06:56.723712 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 10:06:56 crc kubenswrapper[4769]: I1125 10:06:56.733352 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 10:06:57 crc kubenswrapper[4769]: I1125 10:06:57.943233 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:57 crc kubenswrapper[4769]: I1125 10:06:57.943702 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:58 crc kubenswrapper[4769]: I1125 10:06:58.012157 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:06:58 crc kubenswrapper[4769]: I1125 10:06:58.034623 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:58 crc kubenswrapper[4769]: I1125 10:06:58.084229 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-v9lcm"] Nov 25 10:06:58 crc kubenswrapper[4769]: I1125 10:06:58.084493 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" podUID="e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" containerName="dnsmasq-dns" containerID="cri-o://ad05b9dd04cd38e185f707963ff959077116f0538529b73da8c957bacb064692" gracePeriod=10 Nov 25 10:06:58 crc kubenswrapper[4769]: I1125 10:06:58.112149 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:58 crc kubenswrapper[4769]: I1125 10:06:58.297585 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 10:06:59 crc kubenswrapper[4769]: I1125 10:06:59.128470 4769 generic.go:334] "Generic (PLEG): container finished" podID="e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" containerID="ad05b9dd04cd38e185f707963ff959077116f0538529b73da8c957bacb064692" exitCode=0 Nov 25 10:06:59 crc kubenswrapper[4769]: I1125 10:06:59.128493 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" event={"ID":"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd","Type":"ContainerDied","Data":"ad05b9dd04cd38e185f707963ff959077116f0538529b73da8c957bacb064692"} Nov 25 10:06:59 crc kubenswrapper[4769]: I1125 10:06:59.129604 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 10:07:00 crc kubenswrapper[4769]: I1125 10:07:00.143882 4769 generic.go:334] "Generic (PLEG): container finished" podID="ed79e867-002b-4591-b757-0410b73a43ef" containerID="41ab74e3a94e982b2db87464ee5df37c27ce86c7c00fbe475355b5d6a6e8078e" exitCode=0 Nov 25 10:07:00 crc kubenswrapper[4769]: I1125 10:07:00.144015 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:07:00 crc kubenswrapper[4769]: I1125 10:07:00.145453 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lfvzs" event={"ID":"ed79e867-002b-4591-b757-0410b73a43ef","Type":"ContainerDied","Data":"41ab74e3a94e982b2db87464ee5df37c27ce86c7c00fbe475355b5d6a6e8078e"} Nov 25 10:07:00 crc kubenswrapper[4769]: I1125 10:07:00.366922 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:07:00 crc kubenswrapper[4769]: I1125 10:07:00.369858 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:07:00 crc kubenswrapper[4769]: I1125 10:07:00.386617 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:07:00 crc kubenswrapper[4769]: I1125 10:07:00.646923 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-56bfd676cd-c9dgn" podUID="28aedc7b-6594-4c9d-9b2e-ea66937919f2" containerName="barbican-api-log" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 10:07:01 crc kubenswrapper[4769]: I1125 10:07:01.154576 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 10:07:01 crc kubenswrapper[4769]: I1125 10:07:01.159718 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:07:01 crc kubenswrapper[4769]: I1125 10:07:01.161662 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 10:07:01 crc kubenswrapper[4769]: I1125 10:07:01.903649 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" podUID="e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.182:5353: connect: connection refused" Nov 25 10:07:01 crc kubenswrapper[4769]: I1125 10:07:01.962023 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.445225 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-5tt9b" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.448328 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.634290 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rbnp\" (UniqueName: \"kubernetes.io/projected/ed79e867-002b-4591-b757-0410b73a43ef-kube-api-access-9rbnp\") pod \"ed79e867-002b-4591-b757-0410b73a43ef\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.634824 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5m4h2\" (UniqueName: \"kubernetes.io/projected/639faafa-26cf-4b2e-831b-bc95e327cb3b-kube-api-access-5m4h2\") pod \"639faafa-26cf-4b2e-831b-bc95e327cb3b\" (UID: \"639faafa-26cf-4b2e-831b-bc95e327cb3b\") " Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.634896 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-config-data\") pod \"ed79e867-002b-4591-b757-0410b73a43ef\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.635064 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-scripts\") pod \"ed79e867-002b-4591-b757-0410b73a43ef\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.635125 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/639faafa-26cf-4b2e-831b-bc95e327cb3b-config-data\") pod \"639faafa-26cf-4b2e-831b-bc95e327cb3b\" (UID: \"639faafa-26cf-4b2e-831b-bc95e327cb3b\") " Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.635178 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-combined-ca-bundle\") pod \"ed79e867-002b-4591-b757-0410b73a43ef\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.635222 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ed79e867-002b-4591-b757-0410b73a43ef-etc-machine-id\") pod \"ed79e867-002b-4591-b757-0410b73a43ef\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.635304 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-db-sync-config-data\") pod \"ed79e867-002b-4591-b757-0410b73a43ef\" (UID: \"ed79e867-002b-4591-b757-0410b73a43ef\") " Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.635372 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/639faafa-26cf-4b2e-831b-bc95e327cb3b-combined-ca-bundle\") pod \"639faafa-26cf-4b2e-831b-bc95e327cb3b\" (UID: \"639faafa-26cf-4b2e-831b-bc95e327cb3b\") " Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.635995 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ed79e867-002b-4591-b757-0410b73a43ef-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ed79e867-002b-4591-b757-0410b73a43ef" (UID: "ed79e867-002b-4591-b757-0410b73a43ef"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.644168 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ed79e867-002b-4591-b757-0410b73a43ef" (UID: "ed79e867-002b-4591-b757-0410b73a43ef"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.649016 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-scripts" (OuterVolumeSpecName: "scripts") pod "ed79e867-002b-4591-b757-0410b73a43ef" (UID: "ed79e867-002b-4591-b757-0410b73a43ef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.659500 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/639faafa-26cf-4b2e-831b-bc95e327cb3b-kube-api-access-5m4h2" (OuterVolumeSpecName: "kube-api-access-5m4h2") pod "639faafa-26cf-4b2e-831b-bc95e327cb3b" (UID: "639faafa-26cf-4b2e-831b-bc95e327cb3b"). InnerVolumeSpecName "kube-api-access-5m4h2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.659634 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed79e867-002b-4591-b757-0410b73a43ef-kube-api-access-9rbnp" (OuterVolumeSpecName: "kube-api-access-9rbnp") pod "ed79e867-002b-4591-b757-0410b73a43ef" (UID: "ed79e867-002b-4591-b757-0410b73a43ef"). InnerVolumeSpecName "kube-api-access-9rbnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.690879 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/639faafa-26cf-4b2e-831b-bc95e327cb3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "639faafa-26cf-4b2e-831b-bc95e327cb3b" (UID: "639faafa-26cf-4b2e-831b-bc95e327cb3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.727896 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-56bfd676cd-c9dgn" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.739448 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/639faafa-26cf-4b2e-831b-bc95e327cb3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.739480 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rbnp\" (UniqueName: \"kubernetes.io/projected/ed79e867-002b-4591-b757-0410b73a43ef-kube-api-access-9rbnp\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.739496 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5m4h2\" (UniqueName: \"kubernetes.io/projected/639faafa-26cf-4b2e-831b-bc95e327cb3b-kube-api-access-5m4h2\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.739510 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.739524 4769 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ed79e867-002b-4591-b757-0410b73a43ef-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.739535 4769 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.794091 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed79e867-002b-4591-b757-0410b73a43ef" (UID: "ed79e867-002b-4591-b757-0410b73a43ef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.842327 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-config-data" (OuterVolumeSpecName: "config-data") pod "ed79e867-002b-4591-b757-0410b73a43ef" (UID: "ed79e867-002b-4591-b757-0410b73a43ef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.845665 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.860237 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-88f84b978-bzvgm"] Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.860484 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-88f84b978-bzvgm" podUID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" containerName="barbican-api-log" containerID="cri-o://9eae5c3664531ffe21078f8ed4e4a78f2e26c0c12c49c4291bf57ee12e079532" gracePeriod=30 Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.860654 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-88f84b978-bzvgm" podUID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" containerName="barbican-api" containerID="cri-o://ffbb0e32fb8457f11d550040a0ac39717e3c5160cd004ff8752ffb9f9f0247f3" gracePeriod=30 Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.928431 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/639faafa-26cf-4b2e-831b-bc95e327cb3b-config-data" (OuterVolumeSpecName: "config-data") pod "639faafa-26cf-4b2e-831b-bc95e327cb3b" (UID: "639faafa-26cf-4b2e-831b-bc95e327cb3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.951902 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/639faafa-26cf-4b2e-831b-bc95e327cb3b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:02 crc kubenswrapper[4769]: I1125 10:07:02.952275 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed79e867-002b-4591-b757-0410b73a43ef-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.232173 4769 generic.go:334] "Generic (PLEG): container finished" podID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" containerID="9eae5c3664531ffe21078f8ed4e4a78f2e26c0c12c49c4291bf57ee12e079532" exitCode=143 Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.232278 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-88f84b978-bzvgm" event={"ID":"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06","Type":"ContainerDied","Data":"9eae5c3664531ffe21078f8ed4e4a78f2e26c0c12c49c4291bf57ee12e079532"} Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.244407 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-5tt9b" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.244653 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-5tt9b" event={"ID":"639faafa-26cf-4b2e-831b-bc95e327cb3b","Type":"ContainerDied","Data":"15a3e01d7e3f65c689cf62c1ca58e68203d091835e45762906e055c673cbfd4d"} Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.244785 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15a3e01d7e3f65c689cf62c1ca58e68203d091835e45762906e055c673cbfd4d" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.267291 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lfvzs" event={"ID":"ed79e867-002b-4591-b757-0410b73a43ef","Type":"ContainerDied","Data":"1b299edccb75078078891e3e6f3e826b2fc970dff0fe3338cd40cf06669505bb"} Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.267340 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b299edccb75078078891e3e6f3e826b2fc970dff0fe3338cd40cf06669505bb" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.267442 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lfvzs" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.867371 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:07:03 crc kubenswrapper[4769]: E1125 10:07:03.867983 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed79e867-002b-4591-b757-0410b73a43ef" containerName="cinder-db-sync" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.868489 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed79e867-002b-4591-b757-0410b73a43ef" containerName="cinder-db-sync" Nov 25 10:07:03 crc kubenswrapper[4769]: E1125 10:07:03.868512 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="639faafa-26cf-4b2e-831b-bc95e327cb3b" containerName="heat-db-sync" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.868518 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="639faafa-26cf-4b2e-831b-bc95e327cb3b" containerName="heat-db-sync" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.868773 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed79e867-002b-4591-b757-0410b73a43ef" containerName="cinder-db-sync" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.868785 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="639faafa-26cf-4b2e-831b-bc95e327cb3b" containerName="heat-db-sync" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.870520 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.876691 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-66n2x" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.876938 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.877148 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.878171 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.882228 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.882282 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.882448 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-config-data\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.882532 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-scripts\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.882577 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26v5b\" (UniqueName: \"kubernetes.io/projected/c9253d8a-ffc1-4c6e-9df4-452853762e84-kube-api-access-26v5b\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.882603 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c9253d8a-ffc1-4c6e-9df4-452853762e84-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.932729 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.996670 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26v5b\" (UniqueName: \"kubernetes.io/projected/c9253d8a-ffc1-4c6e-9df4-452853762e84-kube-api-access-26v5b\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.996735 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c9253d8a-ffc1-4c6e-9df4-452853762e84-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.996798 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.996820 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.996983 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c9253d8a-ffc1-4c6e-9df4-452853762e84-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.997148 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-config-data\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:03 crc kubenswrapper[4769]: I1125 10:07:03.997211 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-scripts\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.039324 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.039636 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-config-data\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.039687 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.040009 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26v5b\" (UniqueName: \"kubernetes.io/projected/c9253d8a-ffc1-4c6e-9df4-452853762e84-kube-api-access-26v5b\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.059381 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-scripts\") pod \"cinder-scheduler-0\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.094655 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-7zb5q"] Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.097421 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.118678 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-7zb5q"] Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.214265 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.214459 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.214706 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.214810 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nch6d\" (UniqueName: \"kubernetes.io/projected/d127e733-e066-46ef-994b-62244f203f34-kube-api-access-nch6d\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.215003 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-dns-svc\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.215062 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-config\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.224011 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.240520 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.322080 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-dns-svc\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.322150 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-config\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.322192 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.322259 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.322353 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.322401 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nch6d\" (UniqueName: \"kubernetes.io/projected/d127e733-e066-46ef-994b-62244f203f34-kube-api-access-nch6d\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.324791 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.325535 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-config\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.326225 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.326777 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.327720 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-dns-svc\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.384350 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.384614 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nch6d\" (UniqueName: \"kubernetes.io/projected/d127e733-e066-46ef-994b-62244f203f34-kube-api-access-nch6d\") pod \"dnsmasq-dns-5784cf869f-7zb5q\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.429030 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-dns-svc\") pod \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.429112 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-ovsdbserver-sb\") pod \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.429263 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4lst\" (UniqueName: \"kubernetes.io/projected/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-kube-api-access-r4lst\") pod \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.429388 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-ovsdbserver-nb\") pod \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.429466 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-config\") pod \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\" (UID: \"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd\") " Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.459422 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-kube-api-access-r4lst" (OuterVolumeSpecName: "kube-api-access-r4lst") pod "e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" (UID: "e442b552-3d81-4d12-8d2c-8ef6c8b67ffd"). InnerVolumeSpecName "kube-api-access-r4lst". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.545696 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.545992 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4lst\" (UniqueName: \"kubernetes.io/projected/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-kube-api-access-r4lst\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.551400 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" (UID: "e442b552-3d81-4d12-8d2c-8ef6c8b67ffd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.564684 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-v9lcm" event={"ID":"e442b552-3d81-4d12-8d2c-8ef6c8b67ffd","Type":"ContainerDied","Data":"1eab4b9f74ff21467860ddd29f0b11792f8c51d96c5fbf0dffb4ba15a746e05a"} Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.564741 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.572233 4769 scope.go:117] "RemoveContainer" containerID="ad05b9dd04cd38e185f707963ff959077116f0538529b73da8c957bacb064692" Nov 25 10:07:04 crc kubenswrapper[4769]: E1125 10:07:04.577368 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" containerName="dnsmasq-dns" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.577413 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" containerName="dnsmasq-dns" Nov 25 10:07:04 crc kubenswrapper[4769]: E1125 10:07:04.577460 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" containerName="init" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.577468 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" containerName="init" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.578856 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" containerName="dnsmasq-dns" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.581934 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.582067 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.589135 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.631937 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" (UID: "e442b552-3d81-4d12-8d2c-8ef6c8b67ffd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.656007 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.656044 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.668245 4769 scope.go:117] "RemoveContainer" containerID="8247f88a653dcdf8c849249384a7e5a03bdd93533e5563d5f1052ebb9f522085" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.758600 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-config-data\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.758698 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.759355 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-config-data-custom\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.759957 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-scripts\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.760015 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-logs\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.760039 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2tlv\" (UniqueName: \"kubernetes.io/projected/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-kube-api-access-z2tlv\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.760355 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.769493 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-config" (OuterVolumeSpecName: "config") pod "e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" (UID: "e442b552-3d81-4d12-8d2c-8ef6c8b67ffd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.779078 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" (UID: "e442b552-3d81-4d12-8d2c-8ef6c8b67ffd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:04 crc kubenswrapper[4769]: E1125 10:07:04.819766 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="3373fd6b-9a0f-4268-8476-382740118f35" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.863750 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-config-data-custom\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.864606 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-scripts\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.865260 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-logs\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.865403 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-logs\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.865439 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2tlv\" (UniqueName: \"kubernetes.io/projected/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-kube-api-access-z2tlv\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.865645 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.865718 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-config-data\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.865781 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.865866 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.865774 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.865880 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.869318 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-config-data\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.869456 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.869523 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-config-data-custom\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.870764 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-scripts\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.882389 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2tlv\" (UniqueName: \"kubernetes.io/projected/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-kube-api-access-z2tlv\") pod \"cinder-api-0\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.994640 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:07:04 crc kubenswrapper[4769]: I1125 10:07:04.997896 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.037582 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-v9lcm"] Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.049501 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-v9lcm"] Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.239927 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-7zb5q"] Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.441145 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" event={"ID":"d127e733-e066-46ef-994b-62244f203f34","Type":"ContainerStarted","Data":"b7fe08b4bcd54ee98353f4aad1dd0969f7e8a277ccc15292615215294315cc13"} Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.444109 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c9253d8a-ffc1-4c6e-9df4-452853762e84","Type":"ContainerStarted","Data":"f5fb1f027b362a0fe0b3a741a690f3949c0fffaa135997e90e4e9a0cc6934ad9"} Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.451629 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3373fd6b-9a0f-4268-8476-382740118f35","Type":"ContainerStarted","Data":"e9cebea8df275a0049bb2198c50d4d93b9a8b92c5ee990bf83dd7bab023911ea"} Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.451842 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3373fd6b-9a0f-4268-8476-382740118f35" containerName="ceilometer-notification-agent" containerID="cri-o://d936a4df3ae46efea8ababca1e573f5db3891f5502bddf9c59cdda34ccccdc0e" gracePeriod=30 Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.452144 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.452504 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3373fd6b-9a0f-4268-8476-382740118f35" containerName="proxy-httpd" containerID="cri-o://e9cebea8df275a0049bb2198c50d4d93b9a8b92c5ee990bf83dd7bab023911ea" gracePeriod=30 Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.453467 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="3373fd6b-9a0f-4268-8476-382740118f35" containerName="sg-core" containerID="cri-o://b060d04afd82927b6a9152e744e144cbe21ee13ef0ff7b33a921286f098efaf1" gracePeriod=30 Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.548299 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.594014 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5c9685d5f7-xbblq" Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.677171 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-75dfc45cc4-m9wmn"] Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.677564 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-75dfc45cc4-m9wmn" podUID="3259d7a5-de25-4fa9-a5f8-36fc56dd733f" containerName="neutron-api" containerID="cri-o://411b0640f97ec6f18130ff185a231014fd849e7b6725898c129a5f3fbf326f82" gracePeriod=30 Nov 25 10:07:05 crc kubenswrapper[4769]: I1125 10:07:05.678341 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-75dfc45cc4-m9wmn" podUID="3259d7a5-de25-4fa9-a5f8-36fc56dd733f" containerName="neutron-httpd" containerID="cri-o://a745ec1416c3b7dff8b5bc6898f434f97b8e356d63feb9671e13d6d8296a63aa" gracePeriod=30 Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.065244 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-88f84b978-bzvgm" podUID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.200:9311/healthcheck\": read tcp 10.217.0.2:33762->10.217.0.200:9311: read: connection reset by peer" Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.065683 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-88f84b978-bzvgm" podUID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.200:9311/healthcheck\": read tcp 10.217.0.2:33754->10.217.0.200:9311: read: connection reset by peer" Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.271026 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e442b552-3d81-4d12-8d2c-8ef6c8b67ffd" path="/var/lib/kubelet/pods/e442b552-3d81-4d12-8d2c-8ef6c8b67ffd/volumes" Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.586504 4769 generic.go:334] "Generic (PLEG): container finished" podID="3259d7a5-de25-4fa9-a5f8-36fc56dd733f" containerID="a745ec1416c3b7dff8b5bc6898f434f97b8e356d63feb9671e13d6d8296a63aa" exitCode=0 Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.586850 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-75dfc45cc4-m9wmn" event={"ID":"3259d7a5-de25-4fa9-a5f8-36fc56dd733f","Type":"ContainerDied","Data":"a745ec1416c3b7dff8b5bc6898f434f97b8e356d63feb9671e13d6d8296a63aa"} Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.599595 4769 generic.go:334] "Generic (PLEG): container finished" podID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" containerID="ffbb0e32fb8457f11d550040a0ac39717e3c5160cd004ff8752ffb9f9f0247f3" exitCode=0 Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.599653 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-88f84b978-bzvgm" event={"ID":"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06","Type":"ContainerDied","Data":"ffbb0e32fb8457f11d550040a0ac39717e3c5160cd004ff8752ffb9f9f0247f3"} Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.653764 4769 generic.go:334] "Generic (PLEG): container finished" podID="3373fd6b-9a0f-4268-8476-382740118f35" containerID="e9cebea8df275a0049bb2198c50d4d93b9a8b92c5ee990bf83dd7bab023911ea" exitCode=0 Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.653826 4769 generic.go:334] "Generic (PLEG): container finished" podID="3373fd6b-9a0f-4268-8476-382740118f35" containerID="b060d04afd82927b6a9152e744e144cbe21ee13ef0ff7b33a921286f098efaf1" exitCode=2 Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.653894 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3373fd6b-9a0f-4268-8476-382740118f35","Type":"ContainerDied","Data":"e9cebea8df275a0049bb2198c50d4d93b9a8b92c5ee990bf83dd7bab023911ea"} Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.653933 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3373fd6b-9a0f-4268-8476-382740118f35","Type":"ContainerDied","Data":"b060d04afd82927b6a9152e744e144cbe21ee13ef0ff7b33a921286f098efaf1"} Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.815152 4769 generic.go:334] "Generic (PLEG): container finished" podID="d127e733-e066-46ef-994b-62244f203f34" containerID="65e0ebd4b0b43e9d547b1935266f9949553d882d7a5139aa2a766a8c369b0e05" exitCode=0 Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.815282 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" event={"ID":"d127e733-e066-46ef-994b-62244f203f34","Type":"ContainerDied","Data":"65e0ebd4b0b43e9d547b1935266f9949553d882d7a5139aa2a766a8c369b0e05"} Nov 25 10:07:06 crc kubenswrapper[4769]: I1125 10:07:06.818649 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910","Type":"ContainerStarted","Data":"5925d6e9ae2c24b1c38e40494456ccd56b20bbe95993024d466b54de0692c3c0"} Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.094551 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.287203 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-combined-ca-bundle\") pod \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.287866 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzdlg\" (UniqueName: \"kubernetes.io/projected/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-kube-api-access-xzdlg\") pod \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.288013 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-config-data\") pod \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.288189 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-logs\") pod \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.288348 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-config-data-custom\") pod \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\" (UID: \"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06\") " Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.297218 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-logs" (OuterVolumeSpecName: "logs") pod "a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" (UID: "a21fe715-e1e4-4f2f-84ca-e3843b3a4f06"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.304989 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-kube-api-access-xzdlg" (OuterVolumeSpecName: "kube-api-access-xzdlg") pod "a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" (UID: "a21fe715-e1e4-4f2f-84ca-e3843b3a4f06"). InnerVolumeSpecName "kube-api-access-xzdlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.305665 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" (UID: "a21fe715-e1e4-4f2f-84ca-e3843b3a4f06"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.372529 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" (UID: "a21fe715-e1e4-4f2f-84ca-e3843b3a4f06"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.392784 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.392819 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzdlg\" (UniqueName: \"kubernetes.io/projected/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-kube-api-access-xzdlg\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.392832 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.392842 4769 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.406423 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-config-data" (OuterVolumeSpecName: "config-data") pod "a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" (UID: "a21fe715-e1e4-4f2f-84ca-e3843b3a4f06"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.497047 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.678996 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.862375 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" event={"ID":"d127e733-e066-46ef-994b-62244f203f34","Type":"ContainerStarted","Data":"9faac2f35eb8dd5f4919645703894574f6367d783e430dda2b8f706433016f18"} Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.862463 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.869300 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910","Type":"ContainerStarted","Data":"664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b"} Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.872528 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c9253d8a-ffc1-4c6e-9df4-452853762e84","Type":"ContainerStarted","Data":"fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5"} Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.875387 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-88f84b978-bzvgm" event={"ID":"a21fe715-e1e4-4f2f-84ca-e3843b3a4f06","Type":"ContainerDied","Data":"718f7b81cfdf6d55599f44a028ec66b96917d28c1a911f370cc561c618b6f4ab"} Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.875422 4769 scope.go:117] "RemoveContainer" containerID="ffbb0e32fb8457f11d550040a0ac39717e3c5160cd004ff8752ffb9f9f0247f3" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.875526 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-88f84b978-bzvgm" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.897161 4769 generic.go:334] "Generic (PLEG): container finished" podID="3373fd6b-9a0f-4268-8476-382740118f35" containerID="d936a4df3ae46efea8ababca1e573f5db3891f5502bddf9c59cdda34ccccdc0e" exitCode=0 Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.897245 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3373fd6b-9a0f-4268-8476-382740118f35","Type":"ContainerDied","Data":"d936a4df3ae46efea8ababca1e573f5db3891f5502bddf9c59cdda34ccccdc0e"} Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.923842 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" podStartSLOduration=4.923812865 podStartE2EDuration="4.923812865s" podCreationTimestamp="2025-11-25 10:07:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:07:07.882584937 +0000 UTC m=+1376.467557250" watchObservedRunningTime="2025-11-25 10:07:07.923812865 +0000 UTC m=+1376.508785178" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.949506 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-88f84b978-bzvgm"] Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.953257 4769 scope.go:117] "RemoveContainer" containerID="9eae5c3664531ffe21078f8ed4e4a78f2e26c0c12c49c4291bf57ee12e079532" Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.961994 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-88f84b978-bzvgm"] Nov 25 10:07:07 crc kubenswrapper[4769]: I1125 10:07:07.984503 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.014429 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3373fd6b-9a0f-4268-8476-382740118f35-log-httpd\") pod \"3373fd6b-9a0f-4268-8476-382740118f35\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.014484 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-combined-ca-bundle\") pod \"3373fd6b-9a0f-4268-8476-382740118f35\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.014507 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44v62\" (UniqueName: \"kubernetes.io/projected/3373fd6b-9a0f-4268-8476-382740118f35-kube-api-access-44v62\") pod \"3373fd6b-9a0f-4268-8476-382740118f35\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.014585 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-sg-core-conf-yaml\") pod \"3373fd6b-9a0f-4268-8476-382740118f35\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.017768 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3373fd6b-9a0f-4268-8476-382740118f35-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3373fd6b-9a0f-4268-8476-382740118f35" (UID: "3373fd6b-9a0f-4268-8476-382740118f35"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.029396 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3373fd6b-9a0f-4268-8476-382740118f35-kube-api-access-44v62" (OuterVolumeSpecName: "kube-api-access-44v62") pod "3373fd6b-9a0f-4268-8476-382740118f35" (UID: "3373fd6b-9a0f-4268-8476-382740118f35"). InnerVolumeSpecName "kube-api-access-44v62". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.055883 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "3373fd6b-9a0f-4268-8476-382740118f35" (UID: "3373fd6b-9a0f-4268-8476-382740118f35"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.093080 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3373fd6b-9a0f-4268-8476-382740118f35" (UID: "3373fd6b-9a0f-4268-8476-382740118f35"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.117379 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-config-data\") pod \"3373fd6b-9a0f-4268-8476-382740118f35\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.117479 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-scripts\") pod \"3373fd6b-9a0f-4268-8476-382740118f35\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.117536 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3373fd6b-9a0f-4268-8476-382740118f35-run-httpd\") pod \"3373fd6b-9a0f-4268-8476-382740118f35\" (UID: \"3373fd6b-9a0f-4268-8476-382740118f35\") " Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.119331 4769 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3373fd6b-9a0f-4268-8476-382740118f35-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.119386 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.119407 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44v62\" (UniqueName: \"kubernetes.io/projected/3373fd6b-9a0f-4268-8476-382740118f35-kube-api-access-44v62\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.119422 4769 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.119523 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3373fd6b-9a0f-4268-8476-382740118f35-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3373fd6b-9a0f-4268-8476-382740118f35" (UID: "3373fd6b-9a0f-4268-8476-382740118f35"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.124116 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-scripts" (OuterVolumeSpecName: "scripts") pod "3373fd6b-9a0f-4268-8476-382740118f35" (UID: "3373fd6b-9a0f-4268-8476-382740118f35"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.222430 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.222473 4769 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3373fd6b-9a0f-4268-8476-382740118f35-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.234120 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-config-data" (OuterVolumeSpecName: "config-data") pod "3373fd6b-9a0f-4268-8476-382740118f35" (UID: "3373fd6b-9a0f-4268-8476-382740118f35"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.262144 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" path="/var/lib/kubelet/pods/a21fe715-e1e4-4f2f-84ca-e3843b3a4f06/volumes" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.326459 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3373fd6b-9a0f-4268-8476-382740118f35-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.912827 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c9253d8a-ffc1-4c6e-9df4-452853762e84","Type":"ContainerStarted","Data":"ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1"} Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.918612 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3373fd6b-9a0f-4268-8476-382740118f35","Type":"ContainerDied","Data":"5124b006ca83e899eebb48422737db90f4318001cd79ebddcbfccc09c161d2fb"} Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.918649 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.918694 4769 scope.go:117] "RemoveContainer" containerID="e9cebea8df275a0049bb2198c50d4d93b9a8b92c5ee990bf83dd7bab023911ea" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.932749 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" containerName="cinder-api-log" containerID="cri-o://664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b" gracePeriod=30 Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.933609 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910","Type":"ContainerStarted","Data":"dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3"} Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.933671 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.933715 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" containerName="cinder-api" containerID="cri-o://dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3" gracePeriod=30 Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.953702 4769 scope.go:117] "RemoveContainer" containerID="b060d04afd82927b6a9152e744e144cbe21ee13ef0ff7b33a921286f098efaf1" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.955105 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.105245449 podStartE2EDuration="5.955074032s" podCreationTimestamp="2025-11-25 10:07:03 +0000 UTC" firstStartedPulling="2025-11-25 10:07:05.03249786 +0000 UTC m=+1373.617470163" lastFinishedPulling="2025-11-25 10:07:05.882326433 +0000 UTC m=+1374.467298746" observedRunningTime="2025-11-25 10:07:08.945770574 +0000 UTC m=+1377.530742877" watchObservedRunningTime="2025-11-25 10:07:08.955074032 +0000 UTC m=+1377.540046345" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.989270 4769 scope.go:117] "RemoveContainer" containerID="d936a4df3ae46efea8ababca1e573f5db3891f5502bddf9c59cdda34ccccdc0e" Nov 25 10:07:08 crc kubenswrapper[4769]: I1125 10:07:08.991435 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.99140531 podStartE2EDuration="4.99140531s" podCreationTimestamp="2025-11-25 10:07:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:07:08.97676603 +0000 UTC m=+1377.561738353" watchObservedRunningTime="2025-11-25 10:07:08.99140531 +0000 UTC m=+1377.576377623" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.051010 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.069317 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.082447 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:09 crc kubenswrapper[4769]: E1125 10:07:09.082989 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3373fd6b-9a0f-4268-8476-382740118f35" containerName="ceilometer-notification-agent" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.083002 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3373fd6b-9a0f-4268-8476-382740118f35" containerName="ceilometer-notification-agent" Nov 25 10:07:09 crc kubenswrapper[4769]: E1125 10:07:09.083014 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3373fd6b-9a0f-4268-8476-382740118f35" containerName="sg-core" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.083021 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3373fd6b-9a0f-4268-8476-382740118f35" containerName="sg-core" Nov 25 10:07:09 crc kubenswrapper[4769]: E1125 10:07:09.083036 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" containerName="barbican-api-log" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.083043 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" containerName="barbican-api-log" Nov 25 10:07:09 crc kubenswrapper[4769]: E1125 10:07:09.083085 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3373fd6b-9a0f-4268-8476-382740118f35" containerName="proxy-httpd" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.083096 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3373fd6b-9a0f-4268-8476-382740118f35" containerName="proxy-httpd" Nov 25 10:07:09 crc kubenswrapper[4769]: E1125 10:07:09.083113 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" containerName="barbican-api" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.083120 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" containerName="barbican-api" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.083338 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" containerName="barbican-api" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.083350 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3373fd6b-9a0f-4268-8476-382740118f35" containerName="proxy-httpd" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.083366 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3373fd6b-9a0f-4268-8476-382740118f35" containerName="sg-core" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.083384 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3373fd6b-9a0f-4268-8476-382740118f35" containerName="ceilometer-notification-agent" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.083398 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="a21fe715-e1e4-4f2f-84ca-e3843b3a4f06" containerName="barbican-api-log" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.085560 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.090224 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.090485 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.101042 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.171425 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.172320 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-config-data\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.172400 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjbln\" (UniqueName: \"kubernetes.io/projected/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-kube-api-access-fjbln\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.172528 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.172559 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-run-httpd\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.172715 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-log-httpd\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.172837 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-scripts\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.228571 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.274859 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-scripts\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.274949 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.275076 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-config-data\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.275114 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjbln\" (UniqueName: \"kubernetes.io/projected/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-kube-api-access-fjbln\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.275187 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.275210 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-run-httpd\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.275225 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-log-httpd\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.275840 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-log-httpd\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.276037 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-run-httpd\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.281278 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.282279 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-scripts\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.289726 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-config-data\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.292789 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.295331 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjbln\" (UniqueName: \"kubernetes.io/projected/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-kube-api-access-fjbln\") pod \"ceilometer-0\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.571949 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.755459 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.895871 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-scripts\") pod \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.895918 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-logs\") pod \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.895985 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-config-data\") pod \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.896045 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-config-data-custom\") pod \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.896079 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-combined-ca-bundle\") pod \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.896260 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-etc-machine-id\") pod \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.896364 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2tlv\" (UniqueName: \"kubernetes.io/projected/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-kube-api-access-z2tlv\") pod \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\" (UID: \"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910\") " Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.897757 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-logs" (OuterVolumeSpecName: "logs") pod "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" (UID: "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.897825 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" (UID: "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.903937 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-kube-api-access-z2tlv" (OuterVolumeSpecName: "kube-api-access-z2tlv") pod "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" (UID: "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910"). InnerVolumeSpecName "kube-api-access-z2tlv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.905120 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-scripts" (OuterVolumeSpecName: "scripts") pod "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" (UID: "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.905913 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" (UID: "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.952898 4769 generic.go:334] "Generic (PLEG): container finished" podID="c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" containerID="dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3" exitCode=0 Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.952931 4769 generic.go:334] "Generic (PLEG): container finished" podID="c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" containerID="664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b" exitCode=143 Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.953000 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910","Type":"ContainerDied","Data":"dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3"} Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.953033 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910","Type":"ContainerDied","Data":"664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b"} Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.953046 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c3769c5a-3c30-4926-a2d5-8e1ae4e5e910","Type":"ContainerDied","Data":"5925d6e9ae2c24b1c38e40494456ccd56b20bbe95993024d466b54de0692c3c0"} Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.953064 4769 scope.go:117] "RemoveContainer" containerID="dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.953078 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.958179 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" (UID: "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.958449 4769 generic.go:334] "Generic (PLEG): container finished" podID="3259d7a5-de25-4fa9-a5f8-36fc56dd733f" containerID="411b0640f97ec6f18130ff185a231014fd849e7b6725898c129a5f3fbf326f82" exitCode=0 Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.958501 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-75dfc45cc4-m9wmn" event={"ID":"3259d7a5-de25-4fa9-a5f8-36fc56dd733f","Type":"ContainerDied","Data":"411b0640f97ec6f18130ff185a231014fd849e7b6725898c129a5f3fbf326f82"} Nov 25 10:07:09 crc kubenswrapper[4769]: I1125 10:07:09.982719 4769 scope.go:117] "RemoveContainer" containerID="664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:09.999396 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:09.999436 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:09.999445 4769 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:09.999457 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:09.999465 4769 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:09.999474 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2tlv\" (UniqueName: \"kubernetes.io/projected/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-kube-api-access-z2tlv\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.004221 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-config-data" (OuterVolumeSpecName: "config-data") pod "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" (UID: "c3769c5a-3c30-4926-a2d5-8e1ae4e5e910"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.004365 4769 scope.go:117] "RemoveContainer" containerID="dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3" Nov 25 10:07:10 crc kubenswrapper[4769]: E1125 10:07:10.004997 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3\": container with ID starting with dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3 not found: ID does not exist" containerID="dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.005084 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3"} err="failed to get container status \"dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3\": rpc error: code = NotFound desc = could not find container \"dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3\": container with ID starting with dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3 not found: ID does not exist" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.005180 4769 scope.go:117] "RemoveContainer" containerID="664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b" Nov 25 10:07:10 crc kubenswrapper[4769]: E1125 10:07:10.005931 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b\": container with ID starting with 664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b not found: ID does not exist" containerID="664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.006012 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b"} err="failed to get container status \"664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b\": rpc error: code = NotFound desc = could not find container \"664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b\": container with ID starting with 664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b not found: ID does not exist" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.006051 4769 scope.go:117] "RemoveContainer" containerID="dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.006421 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3"} err="failed to get container status \"dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3\": rpc error: code = NotFound desc = could not find container \"dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3\": container with ID starting with dfe17c37ac67a18ac86e1b4439e9f127cdedb6872bdb0ce15d12fb90ec30b6f3 not found: ID does not exist" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.006505 4769 scope.go:117] "RemoveContainer" containerID="664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.006935 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b"} err="failed to get container status \"664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b\": rpc error: code = NotFound desc = could not find container \"664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b\": container with ID starting with 664d049747d3043b003636c079f06f87eae8b9465d7c442de0bd8bfa0faf616b not found: ID does not exist" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.102262 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.155089 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.251223 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3373fd6b-9a0f-4268-8476-382740118f35" path="/var/lib/kubelet/pods/3373fd6b-9a0f-4268-8476-382740118f35/volumes" Nov 25 10:07:10 crc kubenswrapper[4769]: W1125 10:07:10.257171 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf19d47b9_3be8_4525_adcf_5cf8ec05ed68.slice/crio-bbff13873604a5e7978cb31b3b7fb5e81ee11f4cf05e6fec5891656f7b7a60ca WatchSource:0}: Error finding container bbff13873604a5e7978cb31b3b7fb5e81ee11f4cf05e6fec5891656f7b7a60ca: Status 404 returned error can't find the container with id bbff13873604a5e7978cb31b3b7fb5e81ee11f4cf05e6fec5891656f7b7a60ca Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.257579 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.297329 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.307351 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-combined-ca-bundle\") pod \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.307427 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-config\") pod \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.307489 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-httpd-config\") pod \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.307732 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-ovndb-tls-certs\") pod \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.307852 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2jm7\" (UniqueName: \"kubernetes.io/projected/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-kube-api-access-k2jm7\") pod \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\" (UID: \"3259d7a5-de25-4fa9-a5f8-36fc56dd733f\") " Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.310562 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.316121 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "3259d7a5-de25-4fa9-a5f8-36fc56dd733f" (UID: "3259d7a5-de25-4fa9-a5f8-36fc56dd733f"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.330001 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:07:10 crc kubenswrapper[4769]: E1125 10:07:10.331572 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" containerName="cinder-api-log" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.333027 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" containerName="cinder-api-log" Nov 25 10:07:10 crc kubenswrapper[4769]: E1125 10:07:10.333156 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3259d7a5-de25-4fa9-a5f8-36fc56dd733f" containerName="neutron-httpd" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.333214 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3259d7a5-de25-4fa9-a5f8-36fc56dd733f" containerName="neutron-httpd" Nov 25 10:07:10 crc kubenswrapper[4769]: E1125 10:07:10.333288 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" containerName="cinder-api" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.333341 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" containerName="cinder-api" Nov 25 10:07:10 crc kubenswrapper[4769]: E1125 10:07:10.333437 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3259d7a5-de25-4fa9-a5f8-36fc56dd733f" containerName="neutron-api" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.333537 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3259d7a5-de25-4fa9-a5f8-36fc56dd733f" containerName="neutron-api" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.333810 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3259d7a5-de25-4fa9-a5f8-36fc56dd733f" containerName="neutron-httpd" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.334020 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3259d7a5-de25-4fa9-a5f8-36fc56dd733f" containerName="neutron-api" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.334109 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" containerName="cinder-api-log" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.334215 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" containerName="cinder-api" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.332674 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-kube-api-access-k2jm7" (OuterVolumeSpecName: "kube-api-access-k2jm7") pod "3259d7a5-de25-4fa9-a5f8-36fc56dd733f" (UID: "3259d7a5-de25-4fa9-a5f8-36fc56dd733f"). InnerVolumeSpecName "kube-api-access-k2jm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.343201 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.355875 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.356281 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.357082 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.372544 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.411312 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2jm7\" (UniqueName: \"kubernetes.io/projected/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-kube-api-access-k2jm7\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.411350 4769 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.430345 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-config" (OuterVolumeSpecName: "config") pod "3259d7a5-de25-4fa9-a5f8-36fc56dd733f" (UID: "3259d7a5-de25-4fa9-a5f8-36fc56dd733f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.451763 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3259d7a5-de25-4fa9-a5f8-36fc56dd733f" (UID: "3259d7a5-de25-4fa9-a5f8-36fc56dd733f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.478924 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "3259d7a5-de25-4fa9-a5f8-36fc56dd733f" (UID: "3259d7a5-de25-4fa9-a5f8-36fc56dd733f"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.513062 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/87066e63-7bf0-47dd-a601-88c880a1b5e4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.513129 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-config-data\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.513153 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.513175 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.513203 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-config-data-custom\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.513253 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-public-tls-certs\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.513479 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cljzr\" (UniqueName: \"kubernetes.io/projected/87066e63-7bf0-47dd-a601-88c880a1b5e4-kube-api-access-cljzr\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.514163 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-scripts\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.514271 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87066e63-7bf0-47dd-a601-88c880a1b5e4-logs\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.514576 4769 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.514615 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.514630 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/3259d7a5-de25-4fa9-a5f8-36fc56dd733f-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.616295 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/87066e63-7bf0-47dd-a601-88c880a1b5e4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.616367 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-config-data\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.616404 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.616431 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.616428 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/87066e63-7bf0-47dd-a601-88c880a1b5e4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.616656 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-config-data-custom\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.616716 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-public-tls-certs\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.616750 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cljzr\" (UniqueName: \"kubernetes.io/projected/87066e63-7bf0-47dd-a601-88c880a1b5e4-kube-api-access-cljzr\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.616974 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-scripts\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.617361 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87066e63-7bf0-47dd-a601-88c880a1b5e4-logs\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.617638 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/87066e63-7bf0-47dd-a601-88c880a1b5e4-logs\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.620347 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.621259 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-public-tls-certs\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.622699 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-scripts\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.622815 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.623614 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-config-data-custom\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.625353 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/87066e63-7bf0-47dd-a601-88c880a1b5e4-config-data\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.639568 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cljzr\" (UniqueName: \"kubernetes.io/projected/87066e63-7bf0-47dd-a601-88c880a1b5e4-kube-api-access-cljzr\") pod \"cinder-api-0\" (UID: \"87066e63-7bf0-47dd-a601-88c880a1b5e4\") " pod="openstack/cinder-api-0" Nov 25 10:07:10 crc kubenswrapper[4769]: I1125 10:07:10.847799 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:07:11 crc kubenswrapper[4769]: I1125 10:07:11.004635 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f19d47b9-3be8-4525-adcf-5cf8ec05ed68","Type":"ContainerStarted","Data":"bbff13873604a5e7978cb31b3b7fb5e81ee11f4cf05e6fec5891656f7b7a60ca"} Nov 25 10:07:11 crc kubenswrapper[4769]: I1125 10:07:11.017566 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-75dfc45cc4-m9wmn" Nov 25 10:07:11 crc kubenswrapper[4769]: I1125 10:07:11.017635 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-75dfc45cc4-m9wmn" event={"ID":"3259d7a5-de25-4fa9-a5f8-36fc56dd733f","Type":"ContainerDied","Data":"6a4d61afc96f7fb377aa2d042b326a4e89f84b59a0cbb597e08d0f7308b1d289"} Nov 25 10:07:11 crc kubenswrapper[4769]: I1125 10:07:11.017688 4769 scope.go:117] "RemoveContainer" containerID="a745ec1416c3b7dff8b5bc6898f434f97b8e356d63feb9671e13d6d8296a63aa" Nov 25 10:07:11 crc kubenswrapper[4769]: I1125 10:07:11.065221 4769 scope.go:117] "RemoveContainer" containerID="411b0640f97ec6f18130ff185a231014fd849e7b6725898c129a5f3fbf326f82" Nov 25 10:07:11 crc kubenswrapper[4769]: I1125 10:07:11.071770 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-75dfc45cc4-m9wmn"] Nov 25 10:07:11 crc kubenswrapper[4769]: I1125 10:07:11.096442 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-75dfc45cc4-m9wmn"] Nov 25 10:07:11 crc kubenswrapper[4769]: I1125 10:07:11.435574 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:07:11 crc kubenswrapper[4769]: W1125 10:07:11.436538 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod87066e63_7bf0_47dd_a601_88c880a1b5e4.slice/crio-f7fef5bcdd1f656b2f3d6e4fcf6b14b4660029c7207c4c070e0ce8be400f7877 WatchSource:0}: Error finding container f7fef5bcdd1f656b2f3d6e4fcf6b14b4660029c7207c4c070e0ce8be400f7877: Status 404 returned error can't find the container with id f7fef5bcdd1f656b2f3d6e4fcf6b14b4660029c7207c4c070e0ce8be400f7877 Nov 25 10:07:12 crc kubenswrapper[4769]: I1125 10:07:12.035622 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"87066e63-7bf0-47dd-a601-88c880a1b5e4","Type":"ContainerStarted","Data":"f7fef5bcdd1f656b2f3d6e4fcf6b14b4660029c7207c4c070e0ce8be400f7877"} Nov 25 10:07:12 crc kubenswrapper[4769]: I1125 10:07:12.040492 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f19d47b9-3be8-4525-adcf-5cf8ec05ed68","Type":"ContainerStarted","Data":"fd2e4284ab582054817765d1ec02bf285c80eee5ee5af43cc2bf06916fa6b7a5"} Nov 25 10:07:12 crc kubenswrapper[4769]: I1125 10:07:12.040528 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f19d47b9-3be8-4525-adcf-5cf8ec05ed68","Type":"ContainerStarted","Data":"03b0818632efff47884190f830bae5e6400f5396ba923330493b774a2d0a72e4"} Nov 25 10:07:12 crc kubenswrapper[4769]: I1125 10:07:12.272276 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3259d7a5-de25-4fa9-a5f8-36fc56dd733f" path="/var/lib/kubelet/pods/3259d7a5-de25-4fa9-a5f8-36fc56dd733f/volumes" Nov 25 10:07:12 crc kubenswrapper[4769]: I1125 10:07:12.273281 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3769c5a-3c30-4926-a2d5-8e1ae4e5e910" path="/var/lib/kubelet/pods/c3769c5a-3c30-4926-a2d5-8e1ae4e5e910/volumes" Nov 25 10:07:13 crc kubenswrapper[4769]: I1125 10:07:13.057569 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"87066e63-7bf0-47dd-a601-88c880a1b5e4","Type":"ContainerStarted","Data":"d2d4ca39d98673eb8de3f5c7eb7d893ea4bf2937fc654df510f64040caf70e4b"} Nov 25 10:07:13 crc kubenswrapper[4769]: I1125 10:07:13.058310 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 10:07:13 crc kubenswrapper[4769]: I1125 10:07:13.058324 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"87066e63-7bf0-47dd-a601-88c880a1b5e4","Type":"ContainerStarted","Data":"d6a7be5d7daeec7175cf9a1a785fff1dc81abe0c808d27c389644f82ba596fee"} Nov 25 10:07:13 crc kubenswrapper[4769]: I1125 10:07:13.061032 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f19d47b9-3be8-4525-adcf-5cf8ec05ed68","Type":"ContainerStarted","Data":"84c73fc52ebe48575f4c128ea8e61b39efa3c4d7a2a578209f5a53dfd82a7aab"} Nov 25 10:07:13 crc kubenswrapper[4769]: I1125 10:07:13.085820 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.085795761 podStartE2EDuration="3.085795761s" podCreationTimestamp="2025-11-25 10:07:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:07:13.08163702 +0000 UTC m=+1381.666609333" watchObservedRunningTime="2025-11-25 10:07:13.085795761 +0000 UTC m=+1381.670768074" Nov 25 10:07:14 crc kubenswrapper[4769]: I1125 10:07:14.492547 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 10:07:14 crc kubenswrapper[4769]: I1125 10:07:14.547224 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:14 crc kubenswrapper[4769]: I1125 10:07:14.567807 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:07:14 crc kubenswrapper[4769]: I1125 10:07:14.625059 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-8tf2v"] Nov 25 10:07:14 crc kubenswrapper[4769]: I1125 10:07:14.625355 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" podUID="42db85a3-0c1c-416f-b1a0-857c750bb2c8" containerName="dnsmasq-dns" containerID="cri-o://b9e0ae79fee228165fdacb7b309756b0ed9065ffdcb684887bbe6dd5d367eb50" gracePeriod=10 Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.092002 4769 generic.go:334] "Generic (PLEG): container finished" podID="42db85a3-0c1c-416f-b1a0-857c750bb2c8" containerID="b9e0ae79fee228165fdacb7b309756b0ed9065ffdcb684887bbe6dd5d367eb50" exitCode=0 Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.092081 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" event={"ID":"42db85a3-0c1c-416f-b1a0-857c750bb2c8","Type":"ContainerDied","Data":"b9e0ae79fee228165fdacb7b309756b0ed9065ffdcb684887bbe6dd5d367eb50"} Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.095574 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f19d47b9-3be8-4525-adcf-5cf8ec05ed68","Type":"ContainerStarted","Data":"be21b5b052fce5850e61dce4e3173a5984c07e09a17692588190bcc6276bf793"} Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.095826 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="c9253d8a-ffc1-4c6e-9df4-452853762e84" containerName="cinder-scheduler" containerID="cri-o://fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5" gracePeriod=30 Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.095869 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="c9253d8a-ffc1-4c6e-9df4-452853762e84" containerName="probe" containerID="cri-o://ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1" gracePeriod=30 Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.137074 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.236511728 podStartE2EDuration="6.137046884s" podCreationTimestamp="2025-11-25 10:07:09 +0000 UTC" firstStartedPulling="2025-11-25 10:07:10.259605899 +0000 UTC m=+1378.844578212" lastFinishedPulling="2025-11-25 10:07:14.160141015 +0000 UTC m=+1382.745113368" observedRunningTime="2025-11-25 10:07:15.129332408 +0000 UTC m=+1383.714304731" watchObservedRunningTime="2025-11-25 10:07:15.137046884 +0000 UTC m=+1383.722019197" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.284958 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.356488 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-dns-svc\") pod \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.356699 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-ovsdbserver-nb\") pod \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.356905 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-ovsdbserver-sb\") pod \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.357066 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-dns-swift-storage-0\") pod \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.357168 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-config\") pod \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.357393 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7kv2q\" (UniqueName: \"kubernetes.io/projected/42db85a3-0c1c-416f-b1a0-857c750bb2c8-kube-api-access-7kv2q\") pod \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\" (UID: \"42db85a3-0c1c-416f-b1a0-857c750bb2c8\") " Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.377432 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42db85a3-0c1c-416f-b1a0-857c750bb2c8-kube-api-access-7kv2q" (OuterVolumeSpecName: "kube-api-access-7kv2q") pod "42db85a3-0c1c-416f-b1a0-857c750bb2c8" (UID: "42db85a3-0c1c-416f-b1a0-857c750bb2c8"). InnerVolumeSpecName "kube-api-access-7kv2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.450703 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-config" (OuterVolumeSpecName: "config") pod "42db85a3-0c1c-416f-b1a0-857c750bb2c8" (UID: "42db85a3-0c1c-416f-b1a0-857c750bb2c8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.469580 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "42db85a3-0c1c-416f-b1a0-857c750bb2c8" (UID: "42db85a3-0c1c-416f-b1a0-857c750bb2c8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.472079 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7kv2q\" (UniqueName: \"kubernetes.io/projected/42db85a3-0c1c-416f-b1a0-857c750bb2c8-kube-api-access-7kv2q\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.472157 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.472170 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.474216 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "42db85a3-0c1c-416f-b1a0-857c750bb2c8" (UID: "42db85a3-0c1c-416f-b1a0-857c750bb2c8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.476649 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "42db85a3-0c1c-416f-b1a0-857c750bb2c8" (UID: "42db85a3-0c1c-416f-b1a0-857c750bb2c8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.483538 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "42db85a3-0c1c-416f-b1a0-857c750bb2c8" (UID: "42db85a3-0c1c-416f-b1a0-857c750bb2c8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.574260 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.574313 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:15 crc kubenswrapper[4769]: I1125 10:07:15.574325 4769 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/42db85a3-0c1c-416f-b1a0-857c750bb2c8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.122748 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.122604 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-8tf2v" event={"ID":"42db85a3-0c1c-416f-b1a0-857c750bb2c8","Type":"ContainerDied","Data":"5e189cc41795bd16e3952feef0e19f8f033f2018110459328704e305b70708ce"} Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.123178 4769 scope.go:117] "RemoveContainer" containerID="b9e0ae79fee228165fdacb7b309756b0ed9065ffdcb684887bbe6dd5d367eb50" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.129787 4769 generic.go:334] "Generic (PLEG): container finished" podID="c9253d8a-ffc1-4c6e-9df4-452853762e84" containerID="ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1" exitCode=0 Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.132443 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c9253d8a-ffc1-4c6e-9df4-452853762e84","Type":"ContainerDied","Data":"ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1"} Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.132601 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.181268 4769 scope.go:117] "RemoveContainer" containerID="01c0fe592c10b6df89d8a513475f6241d356ab21fb56a81fc3cdff8c30f217aa" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.185814 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-8tf2v"] Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.201280 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-8tf2v"] Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.254103 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42db85a3-0c1c-416f-b1a0-857c750bb2c8" path="/var/lib/kubelet/pods/42db85a3-0c1c-416f-b1a0-857c750bb2c8/volumes" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.737259 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.801559 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-combined-ca-bundle\") pod \"c9253d8a-ffc1-4c6e-9df4-452853762e84\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.801759 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c9253d8a-ffc1-4c6e-9df4-452853762e84-etc-machine-id\") pod \"c9253d8a-ffc1-4c6e-9df4-452853762e84\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.801859 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26v5b\" (UniqueName: \"kubernetes.io/projected/c9253d8a-ffc1-4c6e-9df4-452853762e84-kube-api-access-26v5b\") pod \"c9253d8a-ffc1-4c6e-9df4-452853762e84\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.801893 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c9253d8a-ffc1-4c6e-9df4-452853762e84-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c9253d8a-ffc1-4c6e-9df4-452853762e84" (UID: "c9253d8a-ffc1-4c6e-9df4-452853762e84"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.801942 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-config-data-custom\") pod \"c9253d8a-ffc1-4c6e-9df4-452853762e84\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.802038 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-config-data\") pod \"c9253d8a-ffc1-4c6e-9df4-452853762e84\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.802070 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-scripts\") pod \"c9253d8a-ffc1-4c6e-9df4-452853762e84\" (UID: \"c9253d8a-ffc1-4c6e-9df4-452853762e84\") " Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.802686 4769 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c9253d8a-ffc1-4c6e-9df4-452853762e84-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.817295 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9253d8a-ffc1-4c6e-9df4-452853762e84-kube-api-access-26v5b" (OuterVolumeSpecName: "kube-api-access-26v5b") pod "c9253d8a-ffc1-4c6e-9df4-452853762e84" (UID: "c9253d8a-ffc1-4c6e-9df4-452853762e84"). InnerVolumeSpecName "kube-api-access-26v5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.817406 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-scripts" (OuterVolumeSpecName: "scripts") pod "c9253d8a-ffc1-4c6e-9df4-452853762e84" (UID: "c9253d8a-ffc1-4c6e-9df4-452853762e84"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.817937 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c9253d8a-ffc1-4c6e-9df4-452853762e84" (UID: "c9253d8a-ffc1-4c6e-9df4-452853762e84"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.900053 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c9253d8a-ffc1-4c6e-9df4-452853762e84" (UID: "c9253d8a-ffc1-4c6e-9df4-452853762e84"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.906220 4769 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.906252 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.906264 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.906275 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26v5b\" (UniqueName: \"kubernetes.io/projected/c9253d8a-ffc1-4c6e-9df4-452853762e84-kube-api-access-26v5b\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:16 crc kubenswrapper[4769]: I1125 10:07:16.947703 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-config-data" (OuterVolumeSpecName: "config-data") pod "c9253d8a-ffc1-4c6e-9df4-452853762e84" (UID: "c9253d8a-ffc1-4c6e-9df4-452853762e84"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.010338 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9253d8a-ffc1-4c6e-9df4-452853762e84-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.152434 4769 generic.go:334] "Generic (PLEG): container finished" podID="c9253d8a-ffc1-4c6e-9df4-452853762e84" containerID="fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5" exitCode=0 Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.152619 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.152615 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c9253d8a-ffc1-4c6e-9df4-452853762e84","Type":"ContainerDied","Data":"fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5"} Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.152720 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c9253d8a-ffc1-4c6e-9df4-452853762e84","Type":"ContainerDied","Data":"f5fb1f027b362a0fe0b3a741a690f3949c0fffaa135997e90e4e9a0cc6934ad9"} Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.152778 4769 scope.go:117] "RemoveContainer" containerID="ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.194566 4769 scope.go:117] "RemoveContainer" containerID="fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.215200 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.231914 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.254126 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:07:17 crc kubenswrapper[4769]: E1125 10:07:17.254805 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9253d8a-ffc1-4c6e-9df4-452853762e84" containerName="cinder-scheduler" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.254829 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9253d8a-ffc1-4c6e-9df4-452853762e84" containerName="cinder-scheduler" Nov 25 10:07:17 crc kubenswrapper[4769]: E1125 10:07:17.254846 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9253d8a-ffc1-4c6e-9df4-452853762e84" containerName="probe" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.254863 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9253d8a-ffc1-4c6e-9df4-452853762e84" containerName="probe" Nov 25 10:07:17 crc kubenswrapper[4769]: E1125 10:07:17.254873 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42db85a3-0c1c-416f-b1a0-857c750bb2c8" containerName="dnsmasq-dns" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.254880 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="42db85a3-0c1c-416f-b1a0-857c750bb2c8" containerName="dnsmasq-dns" Nov 25 10:07:17 crc kubenswrapper[4769]: E1125 10:07:17.254912 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42db85a3-0c1c-416f-b1a0-857c750bb2c8" containerName="init" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.254919 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="42db85a3-0c1c-416f-b1a0-857c750bb2c8" containerName="init" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.255194 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="42db85a3-0c1c-416f-b1a0-857c750bb2c8" containerName="dnsmasq-dns" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.255217 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9253d8a-ffc1-4c6e-9df4-452853762e84" containerName="cinder-scheduler" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.255233 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9253d8a-ffc1-4c6e-9df4-452853762e84" containerName="probe" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.256571 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.270770 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.283140 4769 scope.go:117] "RemoveContainer" containerID="ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1" Nov 25 10:07:17 crc kubenswrapper[4769]: E1125 10:07:17.283953 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1\": container with ID starting with ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1 not found: ID does not exist" containerID="ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.284128 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1"} err="failed to get container status \"ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1\": rpc error: code = NotFound desc = could not find container \"ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1\": container with ID starting with ff2cb8a098c033d5c7b8c94de593f7027140c7453006c67ba690f83e6c9565e1 not found: ID does not exist" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.284159 4769 scope.go:117] "RemoveContainer" containerID="fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5" Nov 25 10:07:17 crc kubenswrapper[4769]: E1125 10:07:17.290645 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5\": container with ID starting with fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5 not found: ID does not exist" containerID="fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.290695 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5"} err="failed to get container status \"fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5\": rpc error: code = NotFound desc = could not find container \"fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5\": container with ID starting with fe98056db3420e8b14c15bdae1cd4d1b1fe532e85143ff444c7c9dd039b259b5 not found: ID does not exist" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.307522 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.399083 4769 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod175271fe-6677-49b5-b497-c45ef1816fb7"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod175271fe-6677-49b5-b497-c45ef1816fb7] : Timed out while waiting for systemd to remove kubepods-besteffort-pod175271fe_6677_49b5_b497_c45ef1816fb7.slice" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.420492 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.422162 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.422244 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-scripts\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.422306 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-config-data\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.422525 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.422661 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ntqc\" (UniqueName: \"kubernetes.io/projected/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-kube-api-access-7ntqc\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.525528 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ntqc\" (UniqueName: \"kubernetes.io/projected/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-kube-api-access-7ntqc\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.525631 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.525734 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.525799 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-scripts\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.525850 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-config-data\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.525920 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.526114 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.532245 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-scripts\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.532681 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.536619 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-config-data\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.542156 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.544760 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ntqc\" (UniqueName: \"kubernetes.io/projected/18a4a142-bb1b-4e44-9110-6a6e15b86b0d-kube-api-access-7ntqc\") pod \"cinder-scheduler-0\" (UID: \"18a4a142-bb1b-4e44-9110-6a6e15b86b0d\") " pod="openstack/cinder-scheduler-0" Nov 25 10:07:17 crc kubenswrapper[4769]: I1125 10:07:17.607011 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 10:07:18 crc kubenswrapper[4769]: W1125 10:07:18.174201 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18a4a142_bb1b_4e44_9110_6a6e15b86b0d.slice/crio-ff929a2804c3bca5392c50dafdd2991c06c0395d1063d9cca42d2951a29b55f4 WatchSource:0}: Error finding container ff929a2804c3bca5392c50dafdd2991c06c0395d1063d9cca42d2951a29b55f4: Status 404 returned error can't find the container with id ff929a2804c3bca5392c50dafdd2991c06c0395d1063d9cca42d2951a29b55f4 Nov 25 10:07:18 crc kubenswrapper[4769]: I1125 10:07:18.178592 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:07:18 crc kubenswrapper[4769]: I1125 10:07:18.306717 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9253d8a-ffc1-4c6e-9df4-452853762e84" path="/var/lib/kubelet/pods/c9253d8a-ffc1-4c6e-9df4-452853762e84/volumes" Nov 25 10:07:19 crc kubenswrapper[4769]: I1125 10:07:19.186868 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"18a4a142-bb1b-4e44-9110-6a6e15b86b0d","Type":"ContainerStarted","Data":"7efe33511fbdd50d518a9752921c8dc97ebbb6fbae7b5925d6b3de5b3cf3f76d"} Nov 25 10:07:19 crc kubenswrapper[4769]: I1125 10:07:19.187312 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"18a4a142-bb1b-4e44-9110-6a6e15b86b0d","Type":"ContainerStarted","Data":"ff929a2804c3bca5392c50dafdd2991c06c0395d1063d9cca42d2951a29b55f4"} Nov 25 10:07:19 crc kubenswrapper[4769]: I1125 10:07:19.624318 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-68856897d4-664jr" Nov 25 10:07:19 crc kubenswrapper[4769]: I1125 10:07:19.624775 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-68856897d4-664jr" Nov 25 10:07:20 crc kubenswrapper[4769]: I1125 10:07:20.199847 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"18a4a142-bb1b-4e44-9110-6a6e15b86b0d","Type":"ContainerStarted","Data":"ab597c597774366cc30c85076d9d2b579e8e080020332e3885da0b02cc82cac1"} Nov 25 10:07:20 crc kubenswrapper[4769]: I1125 10:07:20.245225 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.245180435 podStartE2EDuration="3.245180435s" podCreationTimestamp="2025-11-25 10:07:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:07:20.232059415 +0000 UTC m=+1388.817031728" watchObservedRunningTime="2025-11-25 10:07:20.245180435 +0000 UTC m=+1388.830152748" Nov 25 10:07:20 crc kubenswrapper[4769]: I1125 10:07:20.709463 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-d4dbd5b55-g2prp" Nov 25 10:07:22 crc kubenswrapper[4769]: I1125 10:07:22.609766 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 10:07:23 crc kubenswrapper[4769]: I1125 10:07:23.072674 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.035593 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.038453 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.040512 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.040713 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-w78jc" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.042041 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.052033 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.167497 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be068e15-9a8b-472c-9a66-8ee06cf2491f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"be068e15-9a8b-472c-9a66-8ee06cf2491f\") " pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.167542 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/be068e15-9a8b-472c-9a66-8ee06cf2491f-openstack-config\") pod \"openstackclient\" (UID: \"be068e15-9a8b-472c-9a66-8ee06cf2491f\") " pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.167635 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/be068e15-9a8b-472c-9a66-8ee06cf2491f-openstack-config-secret\") pod \"openstackclient\" (UID: \"be068e15-9a8b-472c-9a66-8ee06cf2491f\") " pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.167684 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nplwk\" (UniqueName: \"kubernetes.io/projected/be068e15-9a8b-472c-9a66-8ee06cf2491f-kube-api-access-nplwk\") pod \"openstackclient\" (UID: \"be068e15-9a8b-472c-9a66-8ee06cf2491f\") " pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.231189 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-657bc78fc7-ccwr8"] Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.257431 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-657bc78fc7-ccwr8"] Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.257776 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.269878 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.272752 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.272905 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.287427 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be068e15-9a8b-472c-9a66-8ee06cf2491f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"be068e15-9a8b-472c-9a66-8ee06cf2491f\") " pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.287479 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/be068e15-9a8b-472c-9a66-8ee06cf2491f-openstack-config\") pod \"openstackclient\" (UID: \"be068e15-9a8b-472c-9a66-8ee06cf2491f\") " pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.287863 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/be068e15-9a8b-472c-9a66-8ee06cf2491f-openstack-config-secret\") pod \"openstackclient\" (UID: \"be068e15-9a8b-472c-9a66-8ee06cf2491f\") " pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.288068 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nplwk\" (UniqueName: \"kubernetes.io/projected/be068e15-9a8b-472c-9a66-8ee06cf2491f-kube-api-access-nplwk\") pod \"openstackclient\" (UID: \"be068e15-9a8b-472c-9a66-8ee06cf2491f\") " pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.289424 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/be068e15-9a8b-472c-9a66-8ee06cf2491f-openstack-config\") pod \"openstackclient\" (UID: \"be068e15-9a8b-472c-9a66-8ee06cf2491f\") " pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.377071 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be068e15-9a8b-472c-9a66-8ee06cf2491f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"be068e15-9a8b-472c-9a66-8ee06cf2491f\") " pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.377103 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nplwk\" (UniqueName: \"kubernetes.io/projected/be068e15-9a8b-472c-9a66-8ee06cf2491f-kube-api-access-nplwk\") pod \"openstackclient\" (UID: \"be068e15-9a8b-472c-9a66-8ee06cf2491f\") " pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.389936 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqrrm\" (UniqueName: \"kubernetes.io/projected/5aee4062-7d9d-44f0-a07c-6f0704946803-kube-api-access-lqrrm\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.390045 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5aee4062-7d9d-44f0-a07c-6f0704946803-etc-swift\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.390090 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aee4062-7d9d-44f0-a07c-6f0704946803-config-data\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.390111 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5aee4062-7d9d-44f0-a07c-6f0704946803-internal-tls-certs\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.390169 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5aee4062-7d9d-44f0-a07c-6f0704946803-public-tls-certs\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.390198 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aee4062-7d9d-44f0-a07c-6f0704946803-log-httpd\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.390252 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aee4062-7d9d-44f0-a07c-6f0704946803-combined-ca-bundle\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.390272 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aee4062-7d9d-44f0-a07c-6f0704946803-run-httpd\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.397568 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/be068e15-9a8b-472c-9a66-8ee06cf2491f-openstack-config-secret\") pod \"openstackclient\" (UID: \"be068e15-9a8b-472c-9a66-8ee06cf2491f\") " pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.492123 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aee4062-7d9d-44f0-a07c-6f0704946803-combined-ca-bundle\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.492233 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aee4062-7d9d-44f0-a07c-6f0704946803-run-httpd\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.492864 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aee4062-7d9d-44f0-a07c-6f0704946803-run-httpd\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.492931 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqrrm\" (UniqueName: \"kubernetes.io/projected/5aee4062-7d9d-44f0-a07c-6f0704946803-kube-api-access-lqrrm\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.493008 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5aee4062-7d9d-44f0-a07c-6f0704946803-etc-swift\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.493065 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aee4062-7d9d-44f0-a07c-6f0704946803-config-data\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.493095 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5aee4062-7d9d-44f0-a07c-6f0704946803-internal-tls-certs\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.493180 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5aee4062-7d9d-44f0-a07c-6f0704946803-public-tls-certs\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.493621 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aee4062-7d9d-44f0-a07c-6f0704946803-log-httpd\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.494302 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5aee4062-7d9d-44f0-a07c-6f0704946803-log-httpd\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.498178 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5aee4062-7d9d-44f0-a07c-6f0704946803-internal-tls-certs\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.498198 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aee4062-7d9d-44f0-a07c-6f0704946803-combined-ca-bundle\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.499873 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5aee4062-7d9d-44f0-a07c-6f0704946803-public-tls-certs\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.502798 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5aee4062-7d9d-44f0-a07c-6f0704946803-etc-swift\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.522249 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aee4062-7d9d-44f0-a07c-6f0704946803-config-data\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.532005 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqrrm\" (UniqueName: \"kubernetes.io/projected/5aee4062-7d9d-44f0-a07c-6f0704946803-kube-api-access-lqrrm\") pod \"swift-proxy-657bc78fc7-ccwr8\" (UID: \"5aee4062-7d9d-44f0-a07c-6f0704946803\") " pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.664785 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:07:25 crc kubenswrapper[4769]: I1125 10:07:25.753456 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:26 crc kubenswrapper[4769]: I1125 10:07:26.260818 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 10:07:26 crc kubenswrapper[4769]: W1125 10:07:26.268314 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe068e15_9a8b_472c_9a66_8ee06cf2491f.slice/crio-713848ec6a2a7669a04139fda9b7deeb5df6413459ed1230e62c6b96a667b01b WatchSource:0}: Error finding container 713848ec6a2a7669a04139fda9b7deeb5df6413459ed1230e62c6b96a667b01b: Status 404 returned error can't find the container with id 713848ec6a2a7669a04139fda9b7deeb5df6413459ed1230e62c6b96a667b01b Nov 25 10:07:26 crc kubenswrapper[4769]: I1125 10:07:26.298826 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"be068e15-9a8b-472c-9a66-8ee06cf2491f","Type":"ContainerStarted","Data":"713848ec6a2a7669a04139fda9b7deeb5df6413459ed1230e62c6b96a667b01b"} Nov 25 10:07:26 crc kubenswrapper[4769]: I1125 10:07:26.456814 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-657bc78fc7-ccwr8"] Nov 25 10:07:26 crc kubenswrapper[4769]: W1125 10:07:26.459884 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5aee4062_7d9d_44f0_a07c_6f0704946803.slice/crio-f3a1bf3b761734c752ea2b9567644ab9a6b3f1cd230ebfdc9fec95229b1cdfed WatchSource:0}: Error finding container f3a1bf3b761734c752ea2b9567644ab9a6b3f1cd230ebfdc9fec95229b1cdfed: Status 404 returned error can't find the container with id f3a1bf3b761734c752ea2b9567644ab9a6b3f1cd230ebfdc9fec95229b1cdfed Nov 25 10:07:27 crc kubenswrapper[4769]: I1125 10:07:27.318672 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-657bc78fc7-ccwr8" event={"ID":"5aee4062-7d9d-44f0-a07c-6f0704946803","Type":"ContainerStarted","Data":"3437ab173ffbebefb0b5fe162573d0e57ffc055b43a209b74304cdccd7072f56"} Nov 25 10:07:27 crc kubenswrapper[4769]: I1125 10:07:27.319717 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-657bc78fc7-ccwr8" event={"ID":"5aee4062-7d9d-44f0-a07c-6f0704946803","Type":"ContainerStarted","Data":"10c7546907a0cf70ae9963df5cecb5998737cb25e3114af4b5537416b0db82eb"} Nov 25 10:07:27 crc kubenswrapper[4769]: I1125 10:07:27.319745 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:27 crc kubenswrapper[4769]: I1125 10:07:27.319759 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-657bc78fc7-ccwr8" event={"ID":"5aee4062-7d9d-44f0-a07c-6f0704946803","Type":"ContainerStarted","Data":"f3a1bf3b761734c752ea2b9567644ab9a6b3f1cd230ebfdc9fec95229b1cdfed"} Nov 25 10:07:27 crc kubenswrapper[4769]: I1125 10:07:27.371699 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-657bc78fc7-ccwr8" podStartSLOduration=2.371673493 podStartE2EDuration="2.371673493s" podCreationTimestamp="2025-11-25 10:07:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:07:27.362652543 +0000 UTC m=+1395.947624856" watchObservedRunningTime="2025-11-25 10:07:27.371673493 +0000 UTC m=+1395.956645806" Nov 25 10:07:27 crc kubenswrapper[4769]: I1125 10:07:27.917121 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 10:07:28 crc kubenswrapper[4769]: I1125 10:07:28.332846 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:28 crc kubenswrapper[4769]: I1125 10:07:28.596789 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:28 crc kubenswrapper[4769]: I1125 10:07:28.597218 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="ceilometer-central-agent" containerID="cri-o://03b0818632efff47884190f830bae5e6400f5396ba923330493b774a2d0a72e4" gracePeriod=30 Nov 25 10:07:28 crc kubenswrapper[4769]: I1125 10:07:28.597759 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="proxy-httpd" containerID="cri-o://be21b5b052fce5850e61dce4e3173a5984c07e09a17692588190bcc6276bf793" gracePeriod=30 Nov 25 10:07:28 crc kubenswrapper[4769]: I1125 10:07:28.597849 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="sg-core" containerID="cri-o://84c73fc52ebe48575f4c128ea8e61b39efa3c4d7a2a578209f5a53dfd82a7aab" gracePeriod=30 Nov 25 10:07:28 crc kubenswrapper[4769]: I1125 10:07:28.597897 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="ceilometer-notification-agent" containerID="cri-o://fd2e4284ab582054817765d1ec02bf285c80eee5ee5af43cc2bf06916fa6b7a5" gracePeriod=30 Nov 25 10:07:28 crc kubenswrapper[4769]: I1125 10:07:28.635767 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.175572 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-7687d6c66-wfxhv"] Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.190148 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.192891 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-sc4ww" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.193799 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.196480 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.236126 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-config-data\") pod \"heat-engine-7687d6c66-wfxhv\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.236209 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-config-data-custom\") pod \"heat-engine-7687d6c66-wfxhv\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.236250 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ps94w\" (UniqueName: \"kubernetes.io/projected/4a8a1143-f843-4007-bdc4-095a5047ca69-kube-api-access-ps94w\") pod \"heat-engine-7687d6c66-wfxhv\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.236332 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-combined-ca-bundle\") pod \"heat-engine-7687d6c66-wfxhv\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.263126 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-7687d6c66-wfxhv"] Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.339019 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-config-data\") pod \"heat-engine-7687d6c66-wfxhv\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.339134 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-config-data-custom\") pod \"heat-engine-7687d6c66-wfxhv\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.339270 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ps94w\" (UniqueName: \"kubernetes.io/projected/4a8a1143-f843-4007-bdc4-095a5047ca69-kube-api-access-ps94w\") pod \"heat-engine-7687d6c66-wfxhv\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.339376 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-combined-ca-bundle\") pod \"heat-engine-7687d6c66-wfxhv\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.372043 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-config-data-custom\") pod \"heat-engine-7687d6c66-wfxhv\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.376444 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-config-data\") pod \"heat-engine-7687d6c66-wfxhv\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.399122 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ps94w\" (UniqueName: \"kubernetes.io/projected/4a8a1143-f843-4007-bdc4-095a5047ca69-kube-api-access-ps94w\") pod \"heat-engine-7687d6c66-wfxhv\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.399688 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-combined-ca-bundle\") pod \"heat-engine-7687d6c66-wfxhv\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.506446 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-66b978bc64-9578t"] Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.508900 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.515255 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.519204 4769 generic.go:334] "Generic (PLEG): container finished" podID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerID="be21b5b052fce5850e61dce4e3173a5984c07e09a17692588190bcc6276bf793" exitCode=0 Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.519234 4769 generic.go:334] "Generic (PLEG): container finished" podID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerID="84c73fc52ebe48575f4c128ea8e61b39efa3c4d7a2a578209f5a53dfd82a7aab" exitCode=2 Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.519244 4769 generic.go:334] "Generic (PLEG): container finished" podID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerID="03b0818632efff47884190f830bae5e6400f5396ba923330493b774a2d0a72e4" exitCode=0 Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.520486 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f19d47b9-3be8-4525-adcf-5cf8ec05ed68","Type":"ContainerDied","Data":"be21b5b052fce5850e61dce4e3173a5984c07e09a17692588190bcc6276bf793"} Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.520521 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f19d47b9-3be8-4525-adcf-5cf8ec05ed68","Type":"ContainerDied","Data":"84c73fc52ebe48575f4c128ea8e61b39efa3c4d7a2a578209f5a53dfd82a7aab"} Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.520533 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f19d47b9-3be8-4525-adcf-5cf8ec05ed68","Type":"ContainerDied","Data":"03b0818632efff47884190f830bae5e6400f5396ba923330493b774a2d0a72e4"} Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.570928 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-tq8xm"] Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.609098 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.611082 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.670839 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.670882 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.670906 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.670941 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-combined-ca-bundle\") pod \"heat-cfnapi-66b978bc64-9578t\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.671036 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-config-data\") pod \"heat-cfnapi-66b978bc64-9578t\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.671079 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grtd6\" (UniqueName: \"kubernetes.io/projected/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-kube-api-access-grtd6\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.671115 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-config\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.671138 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-config-data-custom\") pod \"heat-cfnapi-66b978bc64-9578t\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.671171 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnncd\" (UniqueName: \"kubernetes.io/projected/344a181f-8505-44bd-a107-481eb1e382da-kube-api-access-nnncd\") pod \"heat-cfnapi-66b978bc64-9578t\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.671226 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.684130 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-66b978bc64-9578t"] Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.696813 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-tq8xm"] Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.707225 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-86c99fcffb-tfxvs"] Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.709179 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.716230 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.723889 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-86c99fcffb-tfxvs"] Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.776758 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.776812 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.776843 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.776893 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-combined-ca-bundle\") pod \"heat-cfnapi-66b978bc64-9578t\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.776921 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-config-data-custom\") pod \"heat-api-86c99fcffb-tfxvs\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.776956 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-config-data\") pod \"heat-cfnapi-66b978bc64-9578t\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.777004 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9bb6\" (UniqueName: \"kubernetes.io/projected/994253aa-3f23-4bce-839a-9193a22d976e-kube-api-access-l9bb6\") pod \"heat-api-86c99fcffb-tfxvs\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.777024 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-config-data\") pod \"heat-api-86c99fcffb-tfxvs\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.777049 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grtd6\" (UniqueName: \"kubernetes.io/projected/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-kube-api-access-grtd6\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.777093 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-combined-ca-bundle\") pod \"heat-api-86c99fcffb-tfxvs\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.777113 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-config\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.777140 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-config-data-custom\") pod \"heat-cfnapi-66b978bc64-9578t\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.777172 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnncd\" (UniqueName: \"kubernetes.io/projected/344a181f-8505-44bd-a107-481eb1e382da-kube-api-access-nnncd\") pod \"heat-cfnapi-66b978bc64-9578t\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.777241 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.778297 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-dns-svc\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.778833 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-dns-swift-storage-0\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.779244 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-config\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.779523 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-ovsdbserver-sb\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.780190 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-ovsdbserver-nb\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.785660 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-combined-ca-bundle\") pod \"heat-cfnapi-66b978bc64-9578t\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.788725 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-config-data\") pod \"heat-cfnapi-66b978bc64-9578t\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.790170 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-config-data-custom\") pod \"heat-cfnapi-66b978bc64-9578t\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.811836 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnncd\" (UniqueName: \"kubernetes.io/projected/344a181f-8505-44bd-a107-481eb1e382da-kube-api-access-nnncd\") pod \"heat-cfnapi-66b978bc64-9578t\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.830319 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grtd6\" (UniqueName: \"kubernetes.io/projected/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-kube-api-access-grtd6\") pod \"dnsmasq-dns-f6bc4c6c9-tq8xm\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.844035 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.878832 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-config-data-custom\") pod \"heat-api-86c99fcffb-tfxvs\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.878892 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9bb6\" (UniqueName: \"kubernetes.io/projected/994253aa-3f23-4bce-839a-9193a22d976e-kube-api-access-l9bb6\") pod \"heat-api-86c99fcffb-tfxvs\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.878914 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-config-data\") pod \"heat-api-86c99fcffb-tfxvs\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.878971 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-combined-ca-bundle\") pod \"heat-api-86c99fcffb-tfxvs\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.893496 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-config-data\") pod \"heat-api-86c99fcffb-tfxvs\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.904568 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-config-data-custom\") pod \"heat-api-86c99fcffb-tfxvs\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.914371 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-combined-ca-bundle\") pod \"heat-api-86c99fcffb-tfxvs\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.932861 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9bb6\" (UniqueName: \"kubernetes.io/projected/994253aa-3f23-4bce-839a-9193a22d976e-kube-api-access-l9bb6\") pod \"heat-api-86c99fcffb-tfxvs\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:29 crc kubenswrapper[4769]: I1125 10:07:29.973594 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:30 crc kubenswrapper[4769]: I1125 10:07:30.059973 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:30 crc kubenswrapper[4769]: I1125 10:07:30.626494 4769 generic.go:334] "Generic (PLEG): container finished" podID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerID="fd2e4284ab582054817765d1ec02bf285c80eee5ee5af43cc2bf06916fa6b7a5" exitCode=0 Nov 25 10:07:30 crc kubenswrapper[4769]: I1125 10:07:30.626566 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f19d47b9-3be8-4525-adcf-5cf8ec05ed68","Type":"ContainerDied","Data":"fd2e4284ab582054817765d1ec02bf285c80eee5ee5af43cc2bf06916fa6b7a5"} Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.014410 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.096757 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-7687d6c66-wfxhv"] Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.130407 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-config-data\") pod \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.130593 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-combined-ca-bundle\") pod \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.130737 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-sg-core-conf-yaml\") pod \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.131597 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-scripts\") pod \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.131688 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-run-httpd\") pod \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.131791 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-log-httpd\") pod \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.131899 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjbln\" (UniqueName: \"kubernetes.io/projected/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-kube-api-access-fjbln\") pod \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\" (UID: \"f19d47b9-3be8-4525-adcf-5cf8ec05ed68\") " Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.140350 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f19d47b9-3be8-4525-adcf-5cf8ec05ed68" (UID: "f19d47b9-3be8-4525-adcf-5cf8ec05ed68"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.141237 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-scripts" (OuterVolumeSpecName: "scripts") pod "f19d47b9-3be8-4525-adcf-5cf8ec05ed68" (UID: "f19d47b9-3be8-4525-adcf-5cf8ec05ed68"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.144042 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f19d47b9-3be8-4525-adcf-5cf8ec05ed68" (UID: "f19d47b9-3be8-4525-adcf-5cf8ec05ed68"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.152313 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-kube-api-access-fjbln" (OuterVolumeSpecName: "kube-api-access-fjbln") pod "f19d47b9-3be8-4525-adcf-5cf8ec05ed68" (UID: "f19d47b9-3be8-4525-adcf-5cf8ec05ed68"). InnerVolumeSpecName "kube-api-access-fjbln". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.260404 4769 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.260527 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjbln\" (UniqueName: \"kubernetes.io/projected/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-kube-api-access-fjbln\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.260829 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.262699 4769 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.295319 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f19d47b9-3be8-4525-adcf-5cf8ec05ed68" (UID: "f19d47b9-3be8-4525-adcf-5cf8ec05ed68"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.366052 4769 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.437332 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-config-data" (OuterVolumeSpecName: "config-data") pod "f19d47b9-3be8-4525-adcf-5cf8ec05ed68" (UID: "f19d47b9-3be8-4525-adcf-5cf8ec05ed68"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.470864 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.477285 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f19d47b9-3be8-4525-adcf-5cf8ec05ed68" (UID: "f19d47b9-3be8-4525-adcf-5cf8ec05ed68"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.549000 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-66b978bc64-9578t"] Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.561002 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-tq8xm"] Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.575752 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f19d47b9-3be8-4525-adcf-5cf8ec05ed68-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.646332 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-86c99fcffb-tfxvs"] Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.678064 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7687d6c66-wfxhv" event={"ID":"4a8a1143-f843-4007-bdc4-095a5047ca69","Type":"ContainerStarted","Data":"43b0c1f50714851eab120856f7380219c1d5f23176617c933003637600370eb3"} Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.678120 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7687d6c66-wfxhv" event={"ID":"4a8a1143-f843-4007-bdc4-095a5047ca69","Type":"ContainerStarted","Data":"e01285c7c6b85091b06b425f38de0007437ed6a2dd02b8bd3e8f296ae64d560a"} Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.679753 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.701447 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f19d47b9-3be8-4525-adcf-5cf8ec05ed68","Type":"ContainerDied","Data":"bbff13873604a5e7978cb31b3b7fb5e81ee11f4cf05e6fec5891656f7b7a60ca"} Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.701535 4769 scope.go:117] "RemoveContainer" containerID="be21b5b052fce5850e61dce4e3173a5984c07e09a17692588190bcc6276bf793" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.701926 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.726239 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-86c99fcffb-tfxvs" event={"ID":"994253aa-3f23-4bce-839a-9193a22d976e","Type":"ContainerStarted","Data":"96f0f32e6565c3a4b51aaf1ff65d30fa2933d284208e2f947a60bb54285bd065"} Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.736575 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" event={"ID":"a8c0623e-d0b4-4495-a44c-07a1331dcc0f","Type":"ContainerStarted","Data":"cb288b65ac7f6d13208ce78626170cf95fef4336ac70a8301459f9410404d72e"} Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.764087 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-7687d6c66-wfxhv" podStartSLOduration=2.764064273 podStartE2EDuration="2.764064273s" podCreationTimestamp="2025-11-25 10:07:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:07:31.714303407 +0000 UTC m=+1400.299275720" watchObservedRunningTime="2025-11-25 10:07:31.764064273 +0000 UTC m=+1400.349036586" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.799037 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-66b978bc64-9578t" event={"ID":"344a181f-8505-44bd-a107-481eb1e382da","Type":"ContainerStarted","Data":"a71e321b8268f6050d8d8118ac14f92fe4a075c3ca06fd561cfb4b58c80fa708"} Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.830383 4769 scope.go:117] "RemoveContainer" containerID="84c73fc52ebe48575f4c128ea8e61b39efa3c4d7a2a578209f5a53dfd82a7aab" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.841505 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.860437 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.878143 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:31 crc kubenswrapper[4769]: E1125 10:07:31.880719 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="sg-core" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.880753 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="sg-core" Nov 25 10:07:31 crc kubenswrapper[4769]: E1125 10:07:31.880788 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="proxy-httpd" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.880800 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="proxy-httpd" Nov 25 10:07:31 crc kubenswrapper[4769]: E1125 10:07:31.880828 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="ceilometer-notification-agent" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.880841 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="ceilometer-notification-agent" Nov 25 10:07:31 crc kubenswrapper[4769]: E1125 10:07:31.880915 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="ceilometer-central-agent" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.880928 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="ceilometer-central-agent" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.881419 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="sg-core" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.881463 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="proxy-httpd" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.881478 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="ceilometer-notification-agent" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.881508 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" containerName="ceilometer-central-agent" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.886202 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.894295 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.895694 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:07:31 crc kubenswrapper[4769]: I1125 10:07:31.916609 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.001661 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390e02db-45a5-4498-92a6-edde620d04b8-log-httpd\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.001757 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.001786 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-scripts\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.001813 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-config-data\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.001842 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.001896 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390e02db-45a5-4498-92a6-edde620d04b8-run-httpd\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.001993 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5ntv\" (UniqueName: \"kubernetes.io/projected/390e02db-45a5-4498-92a6-edde620d04b8-kube-api-access-b5ntv\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.031358 4769 scope.go:117] "RemoveContainer" containerID="fd2e4284ab582054817765d1ec02bf285c80eee5ee5af43cc2bf06916fa6b7a5" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.080101 4769 scope.go:117] "RemoveContainer" containerID="03b0818632efff47884190f830bae5e6400f5396ba923330493b774a2d0a72e4" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.106292 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390e02db-45a5-4498-92a6-edde620d04b8-log-httpd\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.106401 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.106440 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-scripts\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.106495 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-config-data\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.106534 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.106614 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390e02db-45a5-4498-92a6-edde620d04b8-run-httpd\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.106678 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5ntv\" (UniqueName: \"kubernetes.io/projected/390e02db-45a5-4498-92a6-edde620d04b8-kube-api-access-b5ntv\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.111031 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390e02db-45a5-4498-92a6-edde620d04b8-log-httpd\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.112439 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390e02db-45a5-4498-92a6-edde620d04b8-run-httpd\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.118180 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-config-data\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.137790 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.138235 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.138644 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-scripts\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.145087 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5ntv\" (UniqueName: \"kubernetes.io/projected/390e02db-45a5-4498-92a6-edde620d04b8-kube-api-access-b5ntv\") pod \"ceilometer-0\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.267037 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f19d47b9-3be8-4525-adcf-5cf8ec05ed68" path="/var/lib/kubelet/pods/f19d47b9-3be8-4525-adcf-5cf8ec05ed68/volumes" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.348781 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.824923 4769 generic.go:334] "Generic (PLEG): container finished" podID="a8c0623e-d0b4-4495-a44c-07a1331dcc0f" containerID="4142dfd1c74973e1732d6bfdfe2fcd10753e7a027e8a7cc8f80dce4f2cd0cbfd" exitCode=0 Nov 25 10:07:32 crc kubenswrapper[4769]: I1125 10:07:32.826645 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" event={"ID":"a8c0623e-d0b4-4495-a44c-07a1331dcc0f","Type":"ContainerDied","Data":"4142dfd1c74973e1732d6bfdfe2fcd10753e7a027e8a7cc8f80dce4f2cd0cbfd"} Nov 25 10:07:33 crc kubenswrapper[4769]: I1125 10:07:33.025338 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:33 crc kubenswrapper[4769]: W1125 10:07:33.040815 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod390e02db_45a5_4498_92a6_edde620d04b8.slice/crio-a162ac9d6f394e7837aff3301a7ba95d6e552b434ff0e3d7f4372cc5e43460f3 WatchSource:0}: Error finding container a162ac9d6f394e7837aff3301a7ba95d6e552b434ff0e3d7f4372cc5e43460f3: Status 404 returned error can't find the container with id a162ac9d6f394e7837aff3301a7ba95d6e552b434ff0e3d7f4372cc5e43460f3 Nov 25 10:07:33 crc kubenswrapper[4769]: I1125 10:07:33.856885 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390e02db-45a5-4498-92a6-edde620d04b8","Type":"ContainerStarted","Data":"a162ac9d6f394e7837aff3301a7ba95d6e552b434ff0e3d7f4372cc5e43460f3"} Nov 25 10:07:33 crc kubenswrapper[4769]: I1125 10:07:33.862735 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" event={"ID":"a8c0623e-d0b4-4495-a44c-07a1331dcc0f","Type":"ContainerStarted","Data":"54e4e54498971d5c9fb360958369f829c428201971d966116c3768f99ce31358"} Nov 25 10:07:33 crc kubenswrapper[4769]: I1125 10:07:33.862814 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:33 crc kubenswrapper[4769]: I1125 10:07:33.892789 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" podStartSLOduration=4.89277004 podStartE2EDuration="4.89277004s" podCreationTimestamp="2025-11-25 10:07:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:07:33.888070745 +0000 UTC m=+1402.473043068" watchObservedRunningTime="2025-11-25 10:07:33.89277004 +0000 UTC m=+1402.477742353" Nov 25 10:07:35 crc kubenswrapper[4769]: I1125 10:07:35.770675 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:35 crc kubenswrapper[4769]: I1125 10:07:35.771458 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-657bc78fc7-ccwr8" Nov 25 10:07:35 crc kubenswrapper[4769]: I1125 10:07:35.909784 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390e02db-45a5-4498-92a6-edde620d04b8","Type":"ContainerStarted","Data":"5dcfedb7884471be39bca6018fa56d4485b12af1417d0c22a393117af309dbd2"} Nov 25 10:07:35 crc kubenswrapper[4769]: I1125 10:07:35.921873 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-86c99fcffb-tfxvs" event={"ID":"994253aa-3f23-4bce-839a-9193a22d976e","Type":"ContainerStarted","Data":"35177ce8beaa44ed1615ea137ad50b003688350d023d046b8c6bf95e65b95264"} Nov 25 10:07:35 crc kubenswrapper[4769]: I1125 10:07:35.923036 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:35 crc kubenswrapper[4769]: I1125 10:07:35.931476 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-66b978bc64-9578t" event={"ID":"344a181f-8505-44bd-a107-481eb1e382da","Type":"ContainerStarted","Data":"fe8db9b6bad2ed958730351d2ead96e4d5954d2392ae06cf9cc0244d66f2b31c"} Nov 25 10:07:35 crc kubenswrapper[4769]: I1125 10:07:35.955850 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-86c99fcffb-tfxvs" podStartSLOduration=3.243183588 podStartE2EDuration="6.955827258s" podCreationTimestamp="2025-11-25 10:07:29 +0000 UTC" firstStartedPulling="2025-11-25 10:07:31.615690479 +0000 UTC m=+1400.200662792" lastFinishedPulling="2025-11-25 10:07:35.328334149 +0000 UTC m=+1403.913306462" observedRunningTime="2025-11-25 10:07:35.946553401 +0000 UTC m=+1404.531525714" watchObservedRunningTime="2025-11-25 10:07:35.955827258 +0000 UTC m=+1404.540799571" Nov 25 10:07:35 crc kubenswrapper[4769]: I1125 10:07:35.995001 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-66b978bc64-9578t" podStartSLOduration=3.273507326 podStartE2EDuration="6.994976321s" podCreationTimestamp="2025-11-25 10:07:29 +0000 UTC" firstStartedPulling="2025-11-25 10:07:31.603213157 +0000 UTC m=+1400.188185470" lastFinishedPulling="2025-11-25 10:07:35.324682152 +0000 UTC m=+1403.909654465" observedRunningTime="2025-11-25 10:07:35.975474861 +0000 UTC m=+1404.560447174" watchObservedRunningTime="2025-11-25 10:07:35.994976321 +0000 UTC m=+1404.579948634" Nov 25 10:07:36 crc kubenswrapper[4769]: I1125 10:07:36.953473 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390e02db-45a5-4498-92a6-edde620d04b8","Type":"ContainerStarted","Data":"5402e38ca05b40ed35df466acc448dd61cc735d8b3d25685b691923950460033"} Nov 25 10:07:36 crc kubenswrapper[4769]: I1125 10:07:36.954681 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:37 crc kubenswrapper[4769]: I1125 10:07:37.968070 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390e02db-45a5-4498-92a6-edde620d04b8","Type":"ContainerStarted","Data":"fd59667432f8fa5df9a1393340b3850752dce7da60861f78df9d677ccf87a328"} Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.019047 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-779556746f-vswpj"] Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.021720 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.035857 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-779556746f-vswpj"] Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.055975 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-59d8894fc7-94vz2"] Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.057837 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.073507 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-59d8894fc7-94vz2"] Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.097582 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-78c5b49c8c-v7f8v"] Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.103039 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.147362 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-78c5b49c8c-v7f8v"] Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.208179 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdqxf\" (UniqueName: \"kubernetes.io/projected/16fdfb26-f9e8-40be-88fd-341faa115238-kube-api-access-xdqxf\") pod \"heat-cfnapi-78c5b49c8c-v7f8v\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.208337 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-config-data\") pod \"heat-engine-779556746f-vswpj\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.208536 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-config-data-custom\") pod \"heat-engine-779556746f-vswpj\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.208603 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-config-data-custom\") pod \"heat-api-59d8894fc7-94vz2\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.208699 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-combined-ca-bundle\") pod \"heat-api-59d8894fc7-94vz2\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.208813 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-config-data\") pod \"heat-api-59d8894fc7-94vz2\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.208914 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-combined-ca-bundle\") pod \"heat-cfnapi-78c5b49c8c-v7f8v\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.209125 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-combined-ca-bundle\") pod \"heat-engine-779556746f-vswpj\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.209176 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2pgf\" (UniqueName: \"kubernetes.io/projected/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-kube-api-access-q2pgf\") pod \"heat-engine-779556746f-vswpj\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.209219 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hspm\" (UniqueName: \"kubernetes.io/projected/96b52ba9-e2dc-4a75-8001-b2ce075634e7-kube-api-access-5hspm\") pod \"heat-api-59d8894fc7-94vz2\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.209247 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-config-data\") pod \"heat-cfnapi-78c5b49c8c-v7f8v\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.209289 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-config-data-custom\") pod \"heat-cfnapi-78c5b49c8c-v7f8v\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.312047 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-config-data-custom\") pod \"heat-cfnapi-78c5b49c8c-v7f8v\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.312133 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdqxf\" (UniqueName: \"kubernetes.io/projected/16fdfb26-f9e8-40be-88fd-341faa115238-kube-api-access-xdqxf\") pod \"heat-cfnapi-78c5b49c8c-v7f8v\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.312240 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-config-data\") pod \"heat-engine-779556746f-vswpj\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.312273 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-config-data-custom\") pod \"heat-engine-779556746f-vswpj\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.312295 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-config-data-custom\") pod \"heat-api-59d8894fc7-94vz2\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.312329 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-combined-ca-bundle\") pod \"heat-api-59d8894fc7-94vz2\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.312367 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-config-data\") pod \"heat-api-59d8894fc7-94vz2\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.312399 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-combined-ca-bundle\") pod \"heat-cfnapi-78c5b49c8c-v7f8v\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.312448 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-combined-ca-bundle\") pod \"heat-engine-779556746f-vswpj\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.312503 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2pgf\" (UniqueName: \"kubernetes.io/projected/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-kube-api-access-q2pgf\") pod \"heat-engine-779556746f-vswpj\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.312538 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hspm\" (UniqueName: \"kubernetes.io/projected/96b52ba9-e2dc-4a75-8001-b2ce075634e7-kube-api-access-5hspm\") pod \"heat-api-59d8894fc7-94vz2\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.312568 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-config-data\") pod \"heat-cfnapi-78c5b49c8c-v7f8v\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.328627 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-config-data\") pod \"heat-api-59d8894fc7-94vz2\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.329829 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-config-data\") pod \"heat-cfnapi-78c5b49c8c-v7f8v\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.334218 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-config-data-custom\") pod \"heat-cfnapi-78c5b49c8c-v7f8v\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.334720 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-config-data\") pod \"heat-engine-779556746f-vswpj\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.338804 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-config-data-custom\") pod \"heat-engine-779556746f-vswpj\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.354265 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-combined-ca-bundle\") pod \"heat-cfnapi-78c5b49c8c-v7f8v\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.355814 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-combined-ca-bundle\") pod \"heat-engine-779556746f-vswpj\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.357755 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-combined-ca-bundle\") pod \"heat-api-59d8894fc7-94vz2\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.370447 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hspm\" (UniqueName: \"kubernetes.io/projected/96b52ba9-e2dc-4a75-8001-b2ce075634e7-kube-api-access-5hspm\") pod \"heat-api-59d8894fc7-94vz2\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.371046 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-config-data-custom\") pod \"heat-api-59d8894fc7-94vz2\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.377203 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdqxf\" (UniqueName: \"kubernetes.io/projected/16fdfb26-f9e8-40be-88fd-341faa115238-kube-api-access-xdqxf\") pod \"heat-cfnapi-78c5b49c8c-v7f8v\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.380870 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.408273 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2pgf\" (UniqueName: \"kubernetes.io/projected/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-kube-api-access-q2pgf\") pod \"heat-engine-779556746f-vswpj\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.442874 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.660111 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:39 crc kubenswrapper[4769]: I1125 10:07:39.975234 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:07:40 crc kubenswrapper[4769]: I1125 10:07:40.068690 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-7zb5q"] Nov 25 10:07:40 crc kubenswrapper[4769]: I1125 10:07:40.073637 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" podUID="d127e733-e066-46ef-994b-62244f203f34" containerName="dnsmasq-dns" containerID="cri-o://9faac2f35eb8dd5f4919645703894574f6367d783e430dda2b8f706433016f18" gracePeriod=10 Nov 25 10:07:41 crc kubenswrapper[4769]: I1125 10:07:41.014832 4769 generic.go:334] "Generic (PLEG): container finished" podID="d127e733-e066-46ef-994b-62244f203f34" containerID="9faac2f35eb8dd5f4919645703894574f6367d783e430dda2b8f706433016f18" exitCode=0 Nov 25 10:07:41 crc kubenswrapper[4769]: I1125 10:07:41.015285 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" event={"ID":"d127e733-e066-46ef-994b-62244f203f34","Type":"ContainerDied","Data":"9faac2f35eb8dd5f4919645703894574f6367d783e430dda2b8f706433016f18"} Nov 25 10:07:41 crc kubenswrapper[4769]: I1125 10:07:41.882396 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.059728 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-66b978bc64-9578t"] Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.060221 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-66b978bc64-9578t" podUID="344a181f-8505-44bd-a107-481eb1e382da" containerName="heat-cfnapi" containerID="cri-o://fe8db9b6bad2ed958730351d2ead96e4d5954d2392ae06cf9cc0244d66f2b31c" gracePeriod=60 Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.088486 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-86c99fcffb-tfxvs"] Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.088748 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-86c99fcffb-tfxvs" podUID="994253aa-3f23-4bce-839a-9193a22d976e" containerName="heat-api" containerID="cri-o://35177ce8beaa44ed1615ea137ad50b003688350d023d046b8c6bf95e65b95264" gracePeriod=60 Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.097772 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-66b978bc64-9578t" podUID="344a181f-8505-44bd-a107-481eb1e382da" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.211:8000/healthcheck\": EOF" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.112381 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-546cbc7f88-ncbrw"] Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.114149 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.125600 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.125842 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.153473 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-58b7d79fd9-47jn4"] Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.155610 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.160879 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.161423 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.174341 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-86c99fcffb-tfxvs" podUID="994253aa-3f23-4bce-839a-9193a22d976e" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.213:8004/healthcheck\": EOF" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.175083 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/heat-api-86c99fcffb-tfxvs" podUID="994253aa-3f23-4bce-839a-9193a22d976e" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.213:8004/healthcheck\": EOF" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.184891 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-config-data\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.184935 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-combined-ca-bundle\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.185063 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5f66\" (UniqueName: \"kubernetes.io/projected/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-kube-api-access-j5f66\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.185137 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-config-data-custom\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.185167 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-public-tls-certs\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.185200 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-config-data\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.185328 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-internal-tls-certs\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.185421 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-public-tls-certs\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.185756 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-combined-ca-bundle\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.185866 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7qff\" (UniqueName: \"kubernetes.io/projected/c8bd1d11-b593-4c27-a79e-d49792d851ee-kube-api-access-t7qff\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.185919 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-internal-tls-certs\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.185944 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-config-data-custom\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.195029 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-546cbc7f88-ncbrw"] Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.287083 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-58b7d79fd9-47jn4"] Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.290189 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-internal-tls-certs\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.290268 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-config-data-custom\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.290344 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-config-data\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.290378 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-combined-ca-bundle\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.293017 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5f66\" (UniqueName: \"kubernetes.io/projected/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-kube-api-access-j5f66\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.293073 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-config-data-custom\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.293444 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-public-tls-certs\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.293515 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-config-data\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.293853 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-internal-tls-certs\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.293925 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-public-tls-certs\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.294181 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-combined-ca-bundle\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.294271 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7qff\" (UniqueName: \"kubernetes.io/projected/c8bd1d11-b593-4c27-a79e-d49792d851ee-kube-api-access-t7qff\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.305762 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-public-tls-certs\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.307448 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-config-data\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.312848 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-config-data-custom\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.328218 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-public-tls-certs\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.329362 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-internal-tls-certs\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.330498 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-config-data-custom\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.330500 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-config-data\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.333020 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-combined-ca-bundle\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.342215 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5f66\" (UniqueName: \"kubernetes.io/projected/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-kube-api-access-j5f66\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.343180 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-combined-ca-bundle\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.344051 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-internal-tls-certs\") pod \"heat-cfnapi-546cbc7f88-ncbrw\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.354746 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7qff\" (UniqueName: \"kubernetes.io/projected/c8bd1d11-b593-4c27-a79e-d49792d851ee-kube-api-access-t7qff\") pod \"heat-api-58b7d79fd9-47jn4\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.457292 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:43 crc kubenswrapper[4769]: I1125 10:07:43.526572 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:44 crc kubenswrapper[4769]: I1125 10:07:44.547112 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" podUID="d127e733-e066-46ef-994b-62244f203f34" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.203:5353: connect: connection refused" Nov 25 10:07:47 crc kubenswrapper[4769]: I1125 10:07:47.589234 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-86c99fcffb-tfxvs" podUID="994253aa-3f23-4bce-839a-9193a22d976e" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.213:8004/healthcheck\": read tcp 10.217.0.2:36874->10.217.0.213:8004: read: connection reset by peer" Nov 25 10:07:47 crc kubenswrapper[4769]: I1125 10:07:47.590484 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-86c99fcffb-tfxvs" podUID="994253aa-3f23-4bce-839a-9193a22d976e" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.213:8004/healthcheck\": dial tcp 10.217.0.213:8004: connect: connection refused" Nov 25 10:07:47 crc kubenswrapper[4769]: E1125 10:07:47.905629 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified" Nov 25 10:07:47 crc kubenswrapper[4769]: E1125 10:07:47.906424 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstackclient,Image:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,Command:[/bin/sleep],Args:[infinity],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n87h97h55dh5c4h54hdh5d5h5b5h668hdch84h5bfh67ch5fbh666h69h6dh595h75h5f6h579h5f7hbch5fchbfh575h5f4h5fch6ch558hddh94q,ValueFrom:nil,},EnvVar{Name:OS_CLOUD,Value:default,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_CA_CERT,Value:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_HOST,Value:metric-storage-prometheus.openstack.svc,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_PORT,Value:9090,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openstack-config,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/cloudrc,SubPath:cloudrc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nplwk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42401,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42401,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstackclient_openstack(be068e15-9a8b-472c-9a66-8ee06cf2491f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:07:47 crc kubenswrapper[4769]: E1125 10:07:47.907772 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstackclient" podUID="be068e15-9a8b-472c-9a66-8ee06cf2491f" Nov 25 10:07:48 crc kubenswrapper[4769]: I1125 10:07:48.155419 4769 generic.go:334] "Generic (PLEG): container finished" podID="994253aa-3f23-4bce-839a-9193a22d976e" containerID="35177ce8beaa44ed1615ea137ad50b003688350d023d046b8c6bf95e65b95264" exitCode=0 Nov 25 10:07:48 crc kubenswrapper[4769]: I1125 10:07:48.155909 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-86c99fcffb-tfxvs" event={"ID":"994253aa-3f23-4bce-839a-9193a22d976e","Type":"ContainerDied","Data":"35177ce8beaa44ed1615ea137ad50b003688350d023d046b8c6bf95e65b95264"} Nov 25 10:07:48 crc kubenswrapper[4769]: E1125 10:07:48.169406 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\"\"" pod="openstack/openstackclient" podUID="be068e15-9a8b-472c-9a66-8ee06cf2491f" Nov 25 10:07:48 crc kubenswrapper[4769]: I1125 10:07:48.566295 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-66b978bc64-9578t" podUID="344a181f-8505-44bd-a107-481eb1e382da" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.211:8000/healthcheck\": read tcp 10.217.0.2:44932->10.217.0.211:8000: read: connection reset by peer" Nov 25 10:07:48 crc kubenswrapper[4769]: E1125 10:07:48.592897 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod344a181f_8505_44bd_a107_481eb1e382da.slice/crio-fe8db9b6bad2ed958730351d2ead96e4d5954d2392ae06cf9cc0244d66f2b31c.scope\": RecentStats: unable to find data in memory cache]" Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.170629 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" event={"ID":"d127e733-e066-46ef-994b-62244f203f34","Type":"ContainerDied","Data":"b7fe08b4bcd54ee98353f4aad1dd0969f7e8a277ccc15292615215294315cc13"} Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.171183 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7fe08b4bcd54ee98353f4aad1dd0969f7e8a277ccc15292615215294315cc13" Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.172725 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-86c99fcffb-tfxvs" event={"ID":"994253aa-3f23-4bce-839a-9193a22d976e","Type":"ContainerDied","Data":"96f0f32e6565c3a4b51aaf1ff65d30fa2933d284208e2f947a60bb54285bd065"} Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.172747 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96f0f32e6565c3a4b51aaf1ff65d30fa2933d284208e2f947a60bb54285bd065" Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.174861 4769 generic.go:334] "Generic (PLEG): container finished" podID="344a181f-8505-44bd-a107-481eb1e382da" containerID="fe8db9b6bad2ed958730351d2ead96e4d5954d2392ae06cf9cc0244d66f2b31c" exitCode=0 Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.174886 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-66b978bc64-9578t" event={"ID":"344a181f-8505-44bd-a107-481eb1e382da","Type":"ContainerDied","Data":"fe8db9b6bad2ed958730351d2ead96e4d5954d2392ae06cf9cc0244d66f2b31c"} Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.238835 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.296920 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.371922 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-config-data-custom\") pod \"994253aa-3f23-4bce-839a-9193a22d976e\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.372102 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-combined-ca-bundle\") pod \"994253aa-3f23-4bce-839a-9193a22d976e\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.372357 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9bb6\" (UniqueName: \"kubernetes.io/projected/994253aa-3f23-4bce-839a-9193a22d976e-kube-api-access-l9bb6\") pod \"994253aa-3f23-4bce-839a-9193a22d976e\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.372405 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-config-data\") pod \"994253aa-3f23-4bce-839a-9193a22d976e\" (UID: \"994253aa-3f23-4bce-839a-9193a22d976e\") " Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.380054 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "994253aa-3f23-4bce-839a-9193a22d976e" (UID: "994253aa-3f23-4bce-839a-9193a22d976e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.392284 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/994253aa-3f23-4bce-839a-9193a22d976e-kube-api-access-l9bb6" (OuterVolumeSpecName: "kube-api-access-l9bb6") pod "994253aa-3f23-4bce-839a-9193a22d976e" (UID: "994253aa-3f23-4bce-839a-9193a22d976e"). InnerVolumeSpecName "kube-api-access-l9bb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.477532 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-ovsdbserver-nb\") pod \"d127e733-e066-46ef-994b-62244f203f34\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.477593 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nch6d\" (UniqueName: \"kubernetes.io/projected/d127e733-e066-46ef-994b-62244f203f34-kube-api-access-nch6d\") pod \"d127e733-e066-46ef-994b-62244f203f34\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.477900 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-dns-svc\") pod \"d127e733-e066-46ef-994b-62244f203f34\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.477986 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-dns-swift-storage-0\") pod \"d127e733-e066-46ef-994b-62244f203f34\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.478061 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-ovsdbserver-sb\") pod \"d127e733-e066-46ef-994b-62244f203f34\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.478100 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-config\") pod \"d127e733-e066-46ef-994b-62244f203f34\" (UID: \"d127e733-e066-46ef-994b-62244f203f34\") " Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.482079 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9bb6\" (UniqueName: \"kubernetes.io/projected/994253aa-3f23-4bce-839a-9193a22d976e-kube-api-access-l9bb6\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.482119 4769 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.608385 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d127e733-e066-46ef-994b-62244f203f34-kube-api-access-nch6d" (OuterVolumeSpecName: "kube-api-access-nch6d") pod "d127e733-e066-46ef-994b-62244f203f34" (UID: "d127e733-e066-46ef-994b-62244f203f34"). InnerVolumeSpecName "kube-api-access-nch6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.634722 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-546cbc7f88-ncbrw"] Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.649851 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-58b7d79fd9-47jn4"] Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.660889 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-59d8894fc7-94vz2"] Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.707010 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nch6d\" (UniqueName: \"kubernetes.io/projected/d127e733-e066-46ef-994b-62244f203f34-kube-api-access-nch6d\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:49 crc kubenswrapper[4769]: W1125 10:07:49.714861 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5031f85_50f1_4ffe_b4c4_8ef90a3084bb.slice/crio-af1e91f2c453e5bf782442bbc99a6d59c43e54878bd360311330cc1e9c7a50f6 WatchSource:0}: Error finding container af1e91f2c453e5bf782442bbc99a6d59c43e54878bd360311330cc1e9c7a50f6: Status 404 returned error can't find the container with id af1e91f2c453e5bf782442bbc99a6d59c43e54878bd360311330cc1e9c7a50f6 Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.798338 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-config-data" (OuterVolumeSpecName: "config-data") pod "994253aa-3f23-4bce-839a-9193a22d976e" (UID: "994253aa-3f23-4bce-839a-9193a22d976e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.811496 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.839892 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-78c5b49c8c-v7f8v"] Nov 25 10:07:49 crc kubenswrapper[4769]: I1125 10:07:49.880446 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-779556746f-vswpj"] Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.086404 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d127e733-e066-46ef-994b-62244f203f34" (UID: "d127e733-e066-46ef-994b-62244f203f34"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.116569 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.124243 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.129326 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "994253aa-3f23-4bce-839a-9193a22d976e" (UID: "994253aa-3f23-4bce-839a-9193a22d976e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.218085 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-config" (OuterVolumeSpecName: "config") pod "d127e733-e066-46ef-994b-62244f203f34" (UID: "d127e733-e066-46ef-994b-62244f203f34"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.228453 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.228489 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/994253aa-3f23-4bce-839a-9193a22d976e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.229207 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d127e733-e066-46ef-994b-62244f203f34" (UID: "d127e733-e066-46ef-994b-62244f203f34"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.251981 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d127e733-e066-46ef-994b-62244f203f34" (UID: "d127e733-e066-46ef-994b-62244f203f34"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.290241 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="ceilometer-central-agent" containerID="cri-o://5dcfedb7884471be39bca6018fa56d4485b12af1417d0c22a393117af309dbd2" gracePeriod=30 Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.290898 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="proxy-httpd" containerID="cri-o://a4970afad4f8fa54c587e3ee3430eb3d1a6998c7f1b8b189880458509f4b76d6" gracePeriod=30 Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.290999 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="sg-core" containerID="cri-o://fd59667432f8fa5df9a1393340b3850752dce7da60861f78df9d677ccf87a328" gracePeriod=30 Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.291075 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="ceilometer-notification-agent" containerID="cri-o://5402e38ca05b40ed35df466acc448dd61cc735d8b3d25685b691923950460033" gracePeriod=30 Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.295285 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d127e733-e066-46ef-994b-62244f203f34" (UID: "d127e733-e066-46ef-994b-62244f203f34"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.300794 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-7zb5q" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.300819 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.333947 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.333998 4769 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.334010 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d127e733-e066-46ef-994b-62244f203f34-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.338433 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.392003462 podStartE2EDuration="19.338410406s" podCreationTimestamp="2025-11-25 10:07:31 +0000 UTC" firstStartedPulling="2025-11-25 10:07:33.056310194 +0000 UTC m=+1401.641282507" lastFinishedPulling="2025-11-25 10:07:49.002717138 +0000 UTC m=+1417.587689451" observedRunningTime="2025-11-25 10:07:50.326501588 +0000 UTC m=+1418.911473901" watchObservedRunningTime="2025-11-25 10:07:50.338410406 +0000 UTC m=+1418.923382719" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.435131 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.435182 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" event={"ID":"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb","Type":"ContainerStarted","Data":"af1e91f2c453e5bf782442bbc99a6d59c43e54878bd360311330cc1e9c7a50f6"} Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.435209 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" event={"ID":"16fdfb26-f9e8-40be-88fd-341faa115238","Type":"ContainerStarted","Data":"fe69187cb730e12878de9557c7ad5443b01695435c8a8d1359d65be35cb1b2e9"} Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.435222 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-779556746f-vswpj" event={"ID":"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1","Type":"ContainerStarted","Data":"77bc38c50c041efab48a21c7251e400129b3d34a897027caeb3470acfe292b6f"} Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.435236 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-58b7d79fd9-47jn4" event={"ID":"c8bd1d11-b593-4c27-a79e-d49792d851ee","Type":"ContainerStarted","Data":"8aa1b67da5761c0e354214ffc51afdebe4e113989a68947a8f63b0e803e51f01"} Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.435251 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-66b978bc64-9578t" event={"ID":"344a181f-8505-44bd-a107-481eb1e382da","Type":"ContainerDied","Data":"a71e321b8268f6050d8d8118ac14f92fe4a075c3ca06fd561cfb4b58c80fa708"} Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.435266 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a71e321b8268f6050d8d8118ac14f92fe4a075c3ca06fd561cfb4b58c80fa708" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.435282 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.435293 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390e02db-45a5-4498-92a6-edde620d04b8","Type":"ContainerStarted","Data":"a4970afad4f8fa54c587e3ee3430eb3d1a6998c7f1b8b189880458509f4b76d6"} Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.435303 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-59d8894fc7-94vz2" event={"ID":"96b52ba9-e2dc-4a75-8001-b2ce075634e7","Type":"ContainerStarted","Data":"83f31ab85fa4812ad43f7eabb8ee3e1df7905e7cb6bdfb0dd903d2fc94534a4d"} Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.435535 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4ea324d5-69e1-4f0b-982f-15b0de3ec539" containerName="glance-log" containerID="cri-o://4b84b1aa69963dad2fd6727776c50ccd8e6eb53f4ef4e9a309bc9059cadc7147" gracePeriod=30 Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.442843 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4ea324d5-69e1-4f0b-982f-15b0de3ec539" containerName="glance-httpd" containerID="cri-o://b29fbee263efb754baf1bfbecd2cbb37e73412708dcac6dfb72529016552877d" gracePeriod=30 Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.516791 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.625422 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-7zb5q"] Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.649211 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nnncd\" (UniqueName: \"kubernetes.io/projected/344a181f-8505-44bd-a107-481eb1e382da-kube-api-access-nnncd\") pod \"344a181f-8505-44bd-a107-481eb1e382da\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.649413 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-combined-ca-bundle\") pod \"344a181f-8505-44bd-a107-481eb1e382da\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.649534 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-config-data-custom\") pod \"344a181f-8505-44bd-a107-481eb1e382da\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.649656 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-config-data\") pod \"344a181f-8505-44bd-a107-481eb1e382da\" (UID: \"344a181f-8505-44bd-a107-481eb1e382da\") " Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.660731 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-7zb5q"] Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.668521 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/344a181f-8505-44bd-a107-481eb1e382da-kube-api-access-nnncd" (OuterVolumeSpecName: "kube-api-access-nnncd") pod "344a181f-8505-44bd-a107-481eb1e382da" (UID: "344a181f-8505-44bd-a107-481eb1e382da"). InnerVolumeSpecName "kube-api-access-nnncd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.686336 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "344a181f-8505-44bd-a107-481eb1e382da" (UID: "344a181f-8505-44bd-a107-481eb1e382da"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.756349 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nnncd\" (UniqueName: \"kubernetes.io/projected/344a181f-8505-44bd-a107-481eb1e382da-kube-api-access-nnncd\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.756395 4769 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.853414 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "344a181f-8505-44bd-a107-481eb1e382da" (UID: "344a181f-8505-44bd-a107-481eb1e382da"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.860074 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.875442 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-config-data" (OuterVolumeSpecName: "config-data") pod "344a181f-8505-44bd-a107-481eb1e382da" (UID: "344a181f-8505-44bd-a107-481eb1e382da"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:50 crc kubenswrapper[4769]: I1125 10:07:50.962170 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/344a181f-8505-44bd-a107-481eb1e382da-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.315719 4769 generic.go:334] "Generic (PLEG): container finished" podID="4ea324d5-69e1-4f0b-982f-15b0de3ec539" containerID="4b84b1aa69963dad2fd6727776c50ccd8e6eb53f4ef4e9a309bc9059cadc7147" exitCode=143 Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.315818 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4ea324d5-69e1-4f0b-982f-15b0de3ec539","Type":"ContainerDied","Data":"4b84b1aa69963dad2fd6727776c50ccd8e6eb53f4ef4e9a309bc9059cadc7147"} Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.320171 4769 generic.go:334] "Generic (PLEG): container finished" podID="390e02db-45a5-4498-92a6-edde620d04b8" containerID="a4970afad4f8fa54c587e3ee3430eb3d1a6998c7f1b8b189880458509f4b76d6" exitCode=0 Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.320200 4769 generic.go:334] "Generic (PLEG): container finished" podID="390e02db-45a5-4498-92a6-edde620d04b8" containerID="fd59667432f8fa5df9a1393340b3850752dce7da60861f78df9d677ccf87a328" exitCode=2 Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.320213 4769 generic.go:334] "Generic (PLEG): container finished" podID="390e02db-45a5-4498-92a6-edde620d04b8" containerID="5dcfedb7884471be39bca6018fa56d4485b12af1417d0c22a393117af309dbd2" exitCode=0 Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.320257 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390e02db-45a5-4498-92a6-edde620d04b8","Type":"ContainerDied","Data":"a4970afad4f8fa54c587e3ee3430eb3d1a6998c7f1b8b189880458509f4b76d6"} Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.320287 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390e02db-45a5-4498-92a6-edde620d04b8","Type":"ContainerDied","Data":"fd59667432f8fa5df9a1393340b3850752dce7da60861f78df9d677ccf87a328"} Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.320300 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390e02db-45a5-4498-92a6-edde620d04b8","Type":"ContainerDied","Data":"5dcfedb7884471be39bca6018fa56d4485b12af1417d0c22a393117af309dbd2"} Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.323099 4769 generic.go:334] "Generic (PLEG): container finished" podID="96b52ba9-e2dc-4a75-8001-b2ce075634e7" containerID="c2eed78b57a0fe8cd9146ae55a904007cc9dec09e780476e1ce0912f3ccf6fce" exitCode=1 Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.323168 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-59d8894fc7-94vz2" event={"ID":"96b52ba9-e2dc-4a75-8001-b2ce075634e7","Type":"ContainerDied","Data":"c2eed78b57a0fe8cd9146ae55a904007cc9dec09e780476e1ce0912f3ccf6fce"} Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.324979 4769 scope.go:117] "RemoveContainer" containerID="c2eed78b57a0fe8cd9146ae55a904007cc9dec09e780476e1ce0912f3ccf6fce" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.325521 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" event={"ID":"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb","Type":"ContainerStarted","Data":"3fb0a5458cfab3fc19b103fcdc99c45a4e7c256b6503b12bbec50b55947981c3"} Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.326033 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.330025 4769 generic.go:334] "Generic (PLEG): container finished" podID="16fdfb26-f9e8-40be-88fd-341faa115238" containerID="22baa5f349a41c3f9c3684c415d5a36776a43a78c4636f02fb60679678b5cc13" exitCode=1 Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.330344 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" event={"ID":"16fdfb26-f9e8-40be-88fd-341faa115238","Type":"ContainerDied","Data":"22baa5f349a41c3f9c3684c415d5a36776a43a78c4636f02fb60679678b5cc13"} Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.330553 4769 scope.go:117] "RemoveContainer" containerID="22baa5f349a41c3f9c3684c415d5a36776a43a78c4636f02fb60679678b5cc13" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.338050 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-779556746f-vswpj" event={"ID":"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1","Type":"ContainerStarted","Data":"1a40c4ebd0306d941f589b392746c158e1be9e00a1efae92685ee197c68c2d34"} Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.339575 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.344568 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-66b978bc64-9578t" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.358081 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-58b7d79fd9-47jn4" event={"ID":"c8bd1d11-b593-4c27-a79e-d49792d851ee","Type":"ContainerStarted","Data":"79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178"} Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.358952 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.473925 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" podStartSLOduration=8.47389586 podStartE2EDuration="8.47389586s" podCreationTimestamp="2025-11-25 10:07:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:07:51.435710792 +0000 UTC m=+1420.020683105" watchObservedRunningTime="2025-11-25 10:07:51.47389586 +0000 UTC m=+1420.058868163" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.476599 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-779556746f-vswpj" podStartSLOduration=13.476593152 podStartE2EDuration="13.476593152s" podCreationTimestamp="2025-11-25 10:07:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:07:51.461333235 +0000 UTC m=+1420.046305548" watchObservedRunningTime="2025-11-25 10:07:51.476593152 +0000 UTC m=+1420.061565465" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.505396 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xng9l"] Nov 25 10:07:51 crc kubenswrapper[4769]: E1125 10:07:51.506041 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="344a181f-8505-44bd-a107-481eb1e382da" containerName="heat-cfnapi" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.506060 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="344a181f-8505-44bd-a107-481eb1e382da" containerName="heat-cfnapi" Nov 25 10:07:51 crc kubenswrapper[4769]: E1125 10:07:51.506075 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="994253aa-3f23-4bce-839a-9193a22d976e" containerName="heat-api" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.506081 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="994253aa-3f23-4bce-839a-9193a22d976e" containerName="heat-api" Nov 25 10:07:51 crc kubenswrapper[4769]: E1125 10:07:51.506100 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d127e733-e066-46ef-994b-62244f203f34" containerName="init" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.506108 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="d127e733-e066-46ef-994b-62244f203f34" containerName="init" Nov 25 10:07:51 crc kubenswrapper[4769]: E1125 10:07:51.506127 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d127e733-e066-46ef-994b-62244f203f34" containerName="dnsmasq-dns" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.506134 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="d127e733-e066-46ef-994b-62244f203f34" containerName="dnsmasq-dns" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.506366 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="994253aa-3f23-4bce-839a-9193a22d976e" containerName="heat-api" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.506388 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="344a181f-8505-44bd-a107-481eb1e382da" containerName="heat-cfnapi" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.506400 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="d127e733-e066-46ef-994b-62244f203f34" containerName="dnsmasq-dns" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.510082 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.526257 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-66b978bc64-9578t"] Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.542977 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-66b978bc64-9578t"] Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.562323 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xng9l"] Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.562519 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-58b7d79fd9-47jn4" podStartSLOduration=8.562507211 podStartE2EDuration="8.562507211s" podCreationTimestamp="2025-11-25 10:07:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:07:51.511392569 +0000 UTC m=+1420.096364882" watchObservedRunningTime="2025-11-25 10:07:51.562507211 +0000 UTC m=+1420.147479524" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.663360 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.663713 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="4ea2eb46-81be-409a-9f6b-8775f5458372" containerName="glance-log" containerID="cri-o://5c0bff317d73f056f37cf0fcc331317bdf7c0cfb69ae4a456b42915a028ac8b0" gracePeriod=30 Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.663933 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="4ea2eb46-81be-409a-9f6b-8775f5458372" containerName="glance-httpd" containerID="cri-o://324d6d2418fce58a565ec648cf71a995ff8b4854821799494437c7cf85e2042c" gracePeriod=30 Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.689951 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/922fd841-fddc-4178-ba91-d3b0d45826a8-catalog-content\") pod \"certified-operators-xng9l\" (UID: \"922fd841-fddc-4178-ba91-d3b0d45826a8\") " pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.690558 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhb86\" (UniqueName: \"kubernetes.io/projected/922fd841-fddc-4178-ba91-d3b0d45826a8-kube-api-access-qhb86\") pod \"certified-operators-xng9l\" (UID: \"922fd841-fddc-4178-ba91-d3b0d45826a8\") " pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.691194 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/922fd841-fddc-4178-ba91-d3b0d45826a8-utilities\") pod \"certified-operators-xng9l\" (UID: \"922fd841-fddc-4178-ba91-d3b0d45826a8\") " pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.797271 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/922fd841-fddc-4178-ba91-d3b0d45826a8-utilities\") pod \"certified-operators-xng9l\" (UID: \"922fd841-fddc-4178-ba91-d3b0d45826a8\") " pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.797388 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/922fd841-fddc-4178-ba91-d3b0d45826a8-catalog-content\") pod \"certified-operators-xng9l\" (UID: \"922fd841-fddc-4178-ba91-d3b0d45826a8\") " pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.797505 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhb86\" (UniqueName: \"kubernetes.io/projected/922fd841-fddc-4178-ba91-d3b0d45826a8-kube-api-access-qhb86\") pod \"certified-operators-xng9l\" (UID: \"922fd841-fddc-4178-ba91-d3b0d45826a8\") " pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.798436 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/922fd841-fddc-4178-ba91-d3b0d45826a8-utilities\") pod \"certified-operators-xng9l\" (UID: \"922fd841-fddc-4178-ba91-d3b0d45826a8\") " pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.798676 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/922fd841-fddc-4178-ba91-d3b0d45826a8-catalog-content\") pod \"certified-operators-xng9l\" (UID: \"922fd841-fddc-4178-ba91-d3b0d45826a8\") " pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.822213 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhb86\" (UniqueName: \"kubernetes.io/projected/922fd841-fddc-4178-ba91-d3b0d45826a8-kube-api-access-qhb86\") pod \"certified-operators-xng9l\" (UID: \"922fd841-fddc-4178-ba91-d3b0d45826a8\") " pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:07:51 crc kubenswrapper[4769]: I1125 10:07:51.914404 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.261368 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="344a181f-8505-44bd-a107-481eb1e382da" path="/var/lib/kubelet/pods/344a181f-8505-44bd-a107-481eb1e382da/volumes" Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.263186 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d127e733-e066-46ef-994b-62244f203f34" path="/var/lib/kubelet/pods/d127e733-e066-46ef-994b-62244f203f34/volumes" Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.359375 4769 generic.go:334] "Generic (PLEG): container finished" podID="4ea2eb46-81be-409a-9f6b-8775f5458372" containerID="5c0bff317d73f056f37cf0fcc331317bdf7c0cfb69ae4a456b42915a028ac8b0" exitCode=143 Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.359453 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4ea2eb46-81be-409a-9f6b-8775f5458372","Type":"ContainerDied","Data":"5c0bff317d73f056f37cf0fcc331317bdf7c0cfb69ae4a456b42915a028ac8b0"} Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.363667 4769 generic.go:334] "Generic (PLEG): container finished" podID="96b52ba9-e2dc-4a75-8001-b2ce075634e7" containerID="8093caaf525c4c96d62dc6649237a6df0b7d322ea1f4d46279bc805fa3c12362" exitCode=1 Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.363716 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-59d8894fc7-94vz2" event={"ID":"96b52ba9-e2dc-4a75-8001-b2ce075634e7","Type":"ContainerDied","Data":"8093caaf525c4c96d62dc6649237a6df0b7d322ea1f4d46279bc805fa3c12362"} Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.363750 4769 scope.go:117] "RemoveContainer" containerID="c2eed78b57a0fe8cd9146ae55a904007cc9dec09e780476e1ce0912f3ccf6fce" Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.364790 4769 scope.go:117] "RemoveContainer" containerID="8093caaf525c4c96d62dc6649237a6df0b7d322ea1f4d46279bc805fa3c12362" Nov 25 10:07:52 crc kubenswrapper[4769]: E1125 10:07:52.365086 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-59d8894fc7-94vz2_openstack(96b52ba9-e2dc-4a75-8001-b2ce075634e7)\"" pod="openstack/heat-api-59d8894fc7-94vz2" podUID="96b52ba9-e2dc-4a75-8001-b2ce075634e7" Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.370269 4769 generic.go:334] "Generic (PLEG): container finished" podID="16fdfb26-f9e8-40be-88fd-341faa115238" containerID="9177dbd875a9c38ad4ea9bdbd80eb53eecb6c8629e70060bc18969722cd5be23" exitCode=1 Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.372853 4769 scope.go:117] "RemoveContainer" containerID="9177dbd875a9c38ad4ea9bdbd80eb53eecb6c8629e70060bc18969722cd5be23" Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.373086 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" event={"ID":"16fdfb26-f9e8-40be-88fd-341faa115238","Type":"ContainerDied","Data":"9177dbd875a9c38ad4ea9bdbd80eb53eecb6c8629e70060bc18969722cd5be23"} Nov 25 10:07:52 crc kubenswrapper[4769]: E1125 10:07:52.373106 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-78c5b49c8c-v7f8v_openstack(16fdfb26-f9e8-40be-88fd-341faa115238)\"" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" podUID="16fdfb26-f9e8-40be-88fd-341faa115238" Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.466486 4769 scope.go:117] "RemoveContainer" containerID="22baa5f349a41c3f9c3684c415d5a36776a43a78c4636f02fb60679678b5cc13" Nov 25 10:07:52 crc kubenswrapper[4769]: I1125 10:07:52.517186 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xng9l"] Nov 25 10:07:53 crc kubenswrapper[4769]: I1125 10:07:53.388847 4769 scope.go:117] "RemoveContainer" containerID="9177dbd875a9c38ad4ea9bdbd80eb53eecb6c8629e70060bc18969722cd5be23" Nov 25 10:07:53 crc kubenswrapper[4769]: E1125 10:07:53.389688 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-78c5b49c8c-v7f8v_openstack(16fdfb26-f9e8-40be-88fd-341faa115238)\"" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" podUID="16fdfb26-f9e8-40be-88fd-341faa115238" Nov 25 10:07:53 crc kubenswrapper[4769]: I1125 10:07:53.395928 4769 generic.go:334] "Generic (PLEG): container finished" podID="390e02db-45a5-4498-92a6-edde620d04b8" containerID="5402e38ca05b40ed35df466acc448dd61cc735d8b3d25685b691923950460033" exitCode=0 Nov 25 10:07:53 crc kubenswrapper[4769]: I1125 10:07:53.396025 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390e02db-45a5-4498-92a6-edde620d04b8","Type":"ContainerDied","Data":"5402e38ca05b40ed35df466acc448dd61cc735d8b3d25685b691923950460033"} Nov 25 10:07:53 crc kubenswrapper[4769]: I1125 10:07:53.408734 4769 scope.go:117] "RemoveContainer" containerID="8093caaf525c4c96d62dc6649237a6df0b7d322ea1f4d46279bc805fa3c12362" Nov 25 10:07:53 crc kubenswrapper[4769]: E1125 10:07:53.409010 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-59d8894fc7-94vz2_openstack(96b52ba9-e2dc-4a75-8001-b2ce075634e7)\"" pod="openstack/heat-api-59d8894fc7-94vz2" podUID="96b52ba9-e2dc-4a75-8001-b2ce075634e7" Nov 25 10:07:53 crc kubenswrapper[4769]: I1125 10:07:53.419536 4769 generic.go:334] "Generic (PLEG): container finished" podID="922fd841-fddc-4178-ba91-d3b0d45826a8" containerID="496d2b64547a1021ab173ee2c375f9f68423abfe0918e8077377ceb4b2954ffe" exitCode=0 Nov 25 10:07:53 crc kubenswrapper[4769]: I1125 10:07:53.419658 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xng9l" event={"ID":"922fd841-fddc-4178-ba91-d3b0d45826a8","Type":"ContainerDied","Data":"496d2b64547a1021ab173ee2c375f9f68423abfe0918e8077377ceb4b2954ffe"} Nov 25 10:07:53 crc kubenswrapper[4769]: I1125 10:07:53.419701 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xng9l" event={"ID":"922fd841-fddc-4178-ba91-d3b0d45826a8","Type":"ContainerStarted","Data":"5c14b3313e615835173fd5e59af260fd3190b5559db1f8f7bf299963f41a1ad4"} Nov 25 10:07:53 crc kubenswrapper[4769]: I1125 10:07:53.904110 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.062887 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-sg-core-conf-yaml\") pod \"390e02db-45a5-4498-92a6-edde620d04b8\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.062938 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-scripts\") pod \"390e02db-45a5-4498-92a6-edde620d04b8\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.062991 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5ntv\" (UniqueName: \"kubernetes.io/projected/390e02db-45a5-4498-92a6-edde620d04b8-kube-api-access-b5ntv\") pod \"390e02db-45a5-4498-92a6-edde620d04b8\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.063072 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-config-data\") pod \"390e02db-45a5-4498-92a6-edde620d04b8\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.063118 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390e02db-45a5-4498-92a6-edde620d04b8-log-httpd\") pod \"390e02db-45a5-4498-92a6-edde620d04b8\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.063225 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390e02db-45a5-4498-92a6-edde620d04b8-run-httpd\") pod \"390e02db-45a5-4498-92a6-edde620d04b8\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.063258 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-combined-ca-bundle\") pod \"390e02db-45a5-4498-92a6-edde620d04b8\" (UID: \"390e02db-45a5-4498-92a6-edde620d04b8\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.065939 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/390e02db-45a5-4498-92a6-edde620d04b8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "390e02db-45a5-4498-92a6-edde620d04b8" (UID: "390e02db-45a5-4498-92a6-edde620d04b8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.068106 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/390e02db-45a5-4498-92a6-edde620d04b8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "390e02db-45a5-4498-92a6-edde620d04b8" (UID: "390e02db-45a5-4498-92a6-edde620d04b8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.080588 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-scripts" (OuterVolumeSpecName: "scripts") pod "390e02db-45a5-4498-92a6-edde620d04b8" (UID: "390e02db-45a5-4498-92a6-edde620d04b8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.104551 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "390e02db-45a5-4498-92a6-edde620d04b8" (UID: "390e02db-45a5-4498-92a6-edde620d04b8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.119623 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/390e02db-45a5-4498-92a6-edde620d04b8-kube-api-access-b5ntv" (OuterVolumeSpecName: "kube-api-access-b5ntv") pod "390e02db-45a5-4498-92a6-edde620d04b8" (UID: "390e02db-45a5-4498-92a6-edde620d04b8"). InnerVolumeSpecName "kube-api-access-b5ntv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.166410 4769 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390e02db-45a5-4498-92a6-edde620d04b8-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.166446 4769 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.166459 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.166468 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5ntv\" (UniqueName: \"kubernetes.io/projected/390e02db-45a5-4498-92a6-edde620d04b8-kube-api-access-b5ntv\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.166477 4769 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/390e02db-45a5-4498-92a6-edde620d04b8-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.178142 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "390e02db-45a5-4498-92a6-edde620d04b8" (UID: "390e02db-45a5-4498-92a6-edde620d04b8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.268689 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.382506 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.382582 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.389223 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-config-data" (OuterVolumeSpecName: "config-data") pod "390e02db-45a5-4498-92a6-edde620d04b8" (UID: "390e02db-45a5-4498-92a6-edde620d04b8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.435942 4769 generic.go:334] "Generic (PLEG): container finished" podID="4ea324d5-69e1-4f0b-982f-15b0de3ec539" containerID="b29fbee263efb754baf1bfbecd2cbb37e73412708dcac6dfb72529016552877d" exitCode=0 Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.436021 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4ea324d5-69e1-4f0b-982f-15b0de3ec539","Type":"ContainerDied","Data":"b29fbee263efb754baf1bfbecd2cbb37e73412708dcac6dfb72529016552877d"} Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.436138 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4ea324d5-69e1-4f0b-982f-15b0de3ec539","Type":"ContainerDied","Data":"b8f07bddbf31933390286a4f7ebde0469d938f43e192602e29eb9d14a2e844c7"} Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.436167 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8f07bddbf31933390286a4f7ebde0469d938f43e192602e29eb9d14a2e844c7" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.440261 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.440425 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"390e02db-45a5-4498-92a6-edde620d04b8","Type":"ContainerDied","Data":"a162ac9d6f394e7837aff3301a7ba95d6e552b434ff0e3d7f4372cc5e43460f3"} Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.440485 4769 scope.go:117] "RemoveContainer" containerID="a4970afad4f8fa54c587e3ee3430eb3d1a6998c7f1b8b189880458509f4b76d6" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.442179 4769 scope.go:117] "RemoveContainer" containerID="8093caaf525c4c96d62dc6649237a6df0b7d322ea1f4d46279bc805fa3c12362" Nov 25 10:07:54 crc kubenswrapper[4769]: E1125 10:07:54.442665 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-59d8894fc7-94vz2_openstack(96b52ba9-e2dc-4a75-8001-b2ce075634e7)\"" pod="openstack/heat-api-59d8894fc7-94vz2" podUID="96b52ba9-e2dc-4a75-8001-b2ce075634e7" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.443720 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.444544 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.445200 4769 scope.go:117] "RemoveContainer" containerID="9177dbd875a9c38ad4ea9bdbd80eb53eecb6c8629e70060bc18969722cd5be23" Nov 25 10:07:54 crc kubenswrapper[4769]: E1125 10:07:54.445479 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-78c5b49c8c-v7f8v_openstack(16fdfb26-f9e8-40be-88fd-341faa115238)\"" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" podUID="16fdfb26-f9e8-40be-88fd-341faa115238" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.471508 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.487657 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/390e02db-45a5-4498-92a6-edde620d04b8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.495089 4769 scope.go:117] "RemoveContainer" containerID="fd59667432f8fa5df9a1393340b3850752dce7da60861f78df9d677ccf87a328" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.512021 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.539975 4769 scope.go:117] "RemoveContainer" containerID="5402e38ca05b40ed35df466acc448dd61cc735d8b3d25685b691923950460033" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.564397 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.564491 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:54 crc kubenswrapper[4769]: E1125 10:07:54.565237 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="sg-core" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.565257 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="sg-core" Nov 25 10:07:54 crc kubenswrapper[4769]: E1125 10:07:54.565272 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="proxy-httpd" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.565284 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="proxy-httpd" Nov 25 10:07:54 crc kubenswrapper[4769]: E1125 10:07:54.565305 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea324d5-69e1-4f0b-982f-15b0de3ec539" containerName="glance-log" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.565311 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea324d5-69e1-4f0b-982f-15b0de3ec539" containerName="glance-log" Nov 25 10:07:54 crc kubenswrapper[4769]: E1125 10:07:54.565329 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="ceilometer-central-agent" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.565335 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="ceilometer-central-agent" Nov 25 10:07:54 crc kubenswrapper[4769]: E1125 10:07:54.565352 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="ceilometer-notification-agent" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.565359 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="ceilometer-notification-agent" Nov 25 10:07:54 crc kubenswrapper[4769]: E1125 10:07:54.565377 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea324d5-69e1-4f0b-982f-15b0de3ec539" containerName="glance-httpd" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.565382 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea324d5-69e1-4f0b-982f-15b0de3ec539" containerName="glance-httpd" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.565630 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="ceilometer-notification-agent" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.565663 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ea324d5-69e1-4f0b-982f-15b0de3ec539" containerName="glance-log" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.565686 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="proxy-httpd" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.565693 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="sg-core" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.565704 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ea324d5-69e1-4f0b-982f-15b0de3ec539" containerName="glance-httpd" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.565716 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="390e02db-45a5-4498-92a6-edde620d04b8" containerName="ceilometer-central-agent" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.573569 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.578290 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.578524 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.589048 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-combined-ca-bundle\") pod \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.589106 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4ea324d5-69e1-4f0b-982f-15b0de3ec539-httpd-run\") pod \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.589265 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-scripts\") pod \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.589308 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ea324d5-69e1-4f0b-982f-15b0de3ec539-logs\") pod \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.589767 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.589882 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c92jq\" (UniqueName: \"kubernetes.io/projected/4ea324d5-69e1-4f0b-982f-15b0de3ec539-kube-api-access-c92jq\") pod \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.589918 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-public-tls-certs\") pod \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.589940 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-config-data\") pod \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\" (UID: \"4ea324d5-69e1-4f0b-982f-15b0de3ec539\") " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.590722 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ea324d5-69e1-4f0b-982f-15b0de3ec539-logs" (OuterVolumeSpecName: "logs") pod "4ea324d5-69e1-4f0b-982f-15b0de3ec539" (UID: "4ea324d5-69e1-4f0b-982f-15b0de3ec539"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.590903 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ea324d5-69e1-4f0b-982f-15b0de3ec539-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4ea324d5-69e1-4f0b-982f-15b0de3ec539" (UID: "4ea324d5-69e1-4f0b-982f-15b0de3ec539"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.595824 4769 scope.go:117] "RemoveContainer" containerID="5dcfedb7884471be39bca6018fa56d4485b12af1417d0c22a393117af309dbd2" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.603190 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ea324d5-69e1-4f0b-982f-15b0de3ec539-kube-api-access-c92jq" (OuterVolumeSpecName: "kube-api-access-c92jq") pod "4ea324d5-69e1-4f0b-982f-15b0de3ec539" (UID: "4ea324d5-69e1-4f0b-982f-15b0de3ec539"). InnerVolumeSpecName "kube-api-access-c92jq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.624303 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-scripts" (OuterVolumeSpecName: "scripts") pod "4ea324d5-69e1-4f0b-982f-15b0de3ec539" (UID: "4ea324d5-69e1-4f0b-982f-15b0de3ec539"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.624825 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.654134 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "4ea324d5-69e1-4f0b-982f-15b0de3ec539" (UID: "4ea324d5-69e1-4f0b-982f-15b0de3ec539"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.692512 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-config-data\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.692575 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-run-httpd\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.692640 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bb4sw\" (UniqueName: \"kubernetes.io/projected/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-kube-api-access-bb4sw\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.692673 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.692696 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-log-httpd\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.692718 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.692808 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-scripts\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.692875 4769 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4ea324d5-69e1-4f0b-982f-15b0de3ec539-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.692886 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.692895 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ea324d5-69e1-4f0b-982f-15b0de3ec539-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.692917 4769 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.692926 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c92jq\" (UniqueName: \"kubernetes.io/projected/4ea324d5-69e1-4f0b-982f-15b0de3ec539-kube-api-access-c92jq\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.696085 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-config-data" (OuterVolumeSpecName: "config-data") pod "4ea324d5-69e1-4f0b-982f-15b0de3ec539" (UID: "4ea324d5-69e1-4f0b-982f-15b0de3ec539"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.727599 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4ea324d5-69e1-4f0b-982f-15b0de3ec539" (UID: "4ea324d5-69e1-4f0b-982f-15b0de3ec539"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.739344 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ea324d5-69e1-4f0b-982f-15b0de3ec539" (UID: "4ea324d5-69e1-4f0b-982f-15b0de3ec539"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.742105 4769 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.795668 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-scripts\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.795769 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-config-data\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.795808 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-run-httpd\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.795866 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bb4sw\" (UniqueName: \"kubernetes.io/projected/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-kube-api-access-bb4sw\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.795902 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.795924 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-log-httpd\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.795942 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.796064 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.796093 4769 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.796102 4769 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.796113 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea324d5-69e1-4f0b-982f-15b0de3ec539-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.797298 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-run-httpd\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.797391 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-log-httpd\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.809308 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.810939 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-scripts\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.811259 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-config-data\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.811746 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.834863 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bb4sw\" (UniqueName: \"kubernetes.io/projected/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-kube-api-access-bb4sw\") pod \"ceilometer-0\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " pod="openstack/ceilometer-0" Nov 25 10:07:54 crc kubenswrapper[4769]: I1125 10:07:54.902552 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.484656 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xng9l" event={"ID":"922fd841-fddc-4178-ba91-d3b0d45826a8","Type":"ContainerStarted","Data":"d74f157ebaac55cacb7b0a73bf061a43eb3e5715ac2d9e69c7ae53710777c361"} Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.488303 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.520468 4769 generic.go:334] "Generic (PLEG): container finished" podID="4ea2eb46-81be-409a-9f6b-8775f5458372" containerID="324d6d2418fce58a565ec648cf71a995ff8b4854821799494437c7cf85e2042c" exitCode=0 Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.520578 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4ea2eb46-81be-409a-9f6b-8775f5458372","Type":"ContainerDied","Data":"324d6d2418fce58a565ec648cf71a995ff8b4854821799494437c7cf85e2042c"} Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.522732 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.524695 4769 scope.go:117] "RemoveContainer" containerID="9177dbd875a9c38ad4ea9bdbd80eb53eecb6c8629e70060bc18969722cd5be23" Nov 25 10:07:55 crc kubenswrapper[4769]: E1125 10:07:55.524934 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-78c5b49c8c-v7f8v_openstack(16fdfb26-f9e8-40be-88fd-341faa115238)\"" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" podUID="16fdfb26-f9e8-40be-88fd-341faa115238" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.636191 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.659328 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.677308 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.709042 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:07:55 crc kubenswrapper[4769]: E1125 10:07:55.709744 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea2eb46-81be-409a-9f6b-8775f5458372" containerName="glance-log" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.709766 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea2eb46-81be-409a-9f6b-8775f5458372" containerName="glance-log" Nov 25 10:07:55 crc kubenswrapper[4769]: E1125 10:07:55.709799 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ea2eb46-81be-409a-9f6b-8775f5458372" containerName="glance-httpd" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.709808 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ea2eb46-81be-409a-9f6b-8775f5458372" containerName="glance-httpd" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.710074 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ea2eb46-81be-409a-9f6b-8775f5458372" containerName="glance-httpd" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.710097 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ea2eb46-81be-409a-9f6b-8775f5458372" containerName="glance-log" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.711462 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.713766 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.714239 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.723579 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"4ea2eb46-81be-409a-9f6b-8775f5458372\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.723745 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4ea2eb46-81be-409a-9f6b-8775f5458372-httpd-run\") pod \"4ea2eb46-81be-409a-9f6b-8775f5458372\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.723818 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-combined-ca-bundle\") pod \"4ea2eb46-81be-409a-9f6b-8775f5458372\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.723984 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ea2eb46-81be-409a-9f6b-8775f5458372-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4ea2eb46-81be-409a-9f6b-8775f5458372" (UID: "4ea2eb46-81be-409a-9f6b-8775f5458372"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.724084 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-config-data\") pod \"4ea2eb46-81be-409a-9f6b-8775f5458372\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.724224 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-scripts\") pod \"4ea2eb46-81be-409a-9f6b-8775f5458372\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.724311 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gfn6\" (UniqueName: \"kubernetes.io/projected/4ea2eb46-81be-409a-9f6b-8775f5458372-kube-api-access-7gfn6\") pod \"4ea2eb46-81be-409a-9f6b-8775f5458372\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.724347 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-internal-tls-certs\") pod \"4ea2eb46-81be-409a-9f6b-8775f5458372\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.724430 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ea2eb46-81be-409a-9f6b-8775f5458372-logs\") pod \"4ea2eb46-81be-409a-9f6b-8775f5458372\" (UID: \"4ea2eb46-81be-409a-9f6b-8775f5458372\") " Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.726148 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ea2eb46-81be-409a-9f6b-8775f5458372-logs" (OuterVolumeSpecName: "logs") pod "4ea2eb46-81be-409a-9f6b-8775f5458372" (UID: "4ea2eb46-81be-409a-9f6b-8775f5458372"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.727288 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ea2eb46-81be-409a-9f6b-8775f5458372-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.727310 4769 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4ea2eb46-81be-409a-9f6b-8775f5458372-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.772078 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.774749 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ea2eb46-81be-409a-9f6b-8775f5458372-kube-api-access-7gfn6" (OuterVolumeSpecName: "kube-api-access-7gfn6") pod "4ea2eb46-81be-409a-9f6b-8775f5458372" (UID: "4ea2eb46-81be-409a-9f6b-8775f5458372"). InnerVolumeSpecName "kube-api-access-7gfn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.774773 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-scripts" (OuterVolumeSpecName: "scripts") pod "4ea2eb46-81be-409a-9f6b-8775f5458372" (UID: "4ea2eb46-81be-409a-9f6b-8775f5458372"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.775661 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "4ea2eb46-81be-409a-9f6b-8775f5458372" (UID: "4ea2eb46-81be-409a-9f6b-8775f5458372"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.829443 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4dc076f-e68c-4c7e-ae16-f8576de44f48-config-data\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.829501 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4dc076f-e68c-4c7e-ae16-f8576de44f48-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.829522 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4dc076f-e68c-4c7e-ae16-f8576de44f48-logs\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.829615 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.829652 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e4dc076f-e68c-4c7e-ae16-f8576de44f48-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.829692 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4dc076f-e68c-4c7e-ae16-f8576de44f48-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.829752 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdz26\" (UniqueName: \"kubernetes.io/projected/e4dc076f-e68c-4c7e-ae16-f8576de44f48-kube-api-access-fdz26\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.829779 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4dc076f-e68c-4c7e-ae16-f8576de44f48-scripts\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.829848 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.829860 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gfn6\" (UniqueName: \"kubernetes.io/projected/4ea2eb46-81be-409a-9f6b-8775f5458372-kube-api-access-7gfn6\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.829880 4769 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.835479 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ea2eb46-81be-409a-9f6b-8775f5458372" (UID: "4ea2eb46-81be-409a-9f6b-8775f5458372"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.880055 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4ea2eb46-81be-409a-9f6b-8775f5458372" (UID: "4ea2eb46-81be-409a-9f6b-8775f5458372"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.900900 4769 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.905100 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-config-data" (OuterVolumeSpecName: "config-data") pod "4ea2eb46-81be-409a-9f6b-8775f5458372" (UID: "4ea2eb46-81be-409a-9f6b-8775f5458372"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.932095 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.932179 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e4dc076f-e68c-4c7e-ae16-f8576de44f48-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.932267 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4dc076f-e68c-4c7e-ae16-f8576de44f48-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.932347 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdz26\" (UniqueName: \"kubernetes.io/projected/e4dc076f-e68c-4c7e-ae16-f8576de44f48-kube-api-access-fdz26\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.932390 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4dc076f-e68c-4c7e-ae16-f8576de44f48-scripts\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.932450 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4dc076f-e68c-4c7e-ae16-f8576de44f48-config-data\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.932490 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4dc076f-e68c-4c7e-ae16-f8576de44f48-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.932514 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4dc076f-e68c-4c7e-ae16-f8576de44f48-logs\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.932629 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.932647 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.932661 4769 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4ea2eb46-81be-409a-9f6b-8775f5458372-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.932674 4769 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.933188 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e4dc076f-e68c-4c7e-ae16-f8576de44f48-logs\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.933494 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e4dc076f-e68c-4c7e-ae16-f8576de44f48-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.933496 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.937598 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e4dc076f-e68c-4c7e-ae16-f8576de44f48-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.939742 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e4dc076f-e68c-4c7e-ae16-f8576de44f48-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.941565 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e4dc076f-e68c-4c7e-ae16-f8576de44f48-config-data\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.945355 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e4dc076f-e68c-4c7e-ae16-f8576de44f48-scripts\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:55 crc kubenswrapper[4769]: I1125 10:07:55.960414 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdz26\" (UniqueName: \"kubernetes.io/projected/e4dc076f-e68c-4c7e-ae16-f8576de44f48-kube-api-access-fdz26\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.002848 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-external-api-0\" (UID: \"e4dc076f-e68c-4c7e-ae16-f8576de44f48\") " pod="openstack/glance-default-external-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.230955 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.252955 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="390e02db-45a5-4498-92a6-edde620d04b8" path="/var/lib/kubelet/pods/390e02db-45a5-4498-92a6-edde620d04b8/volumes" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.253808 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ea324d5-69e1-4f0b-982f-15b0de3ec539" path="/var/lib/kubelet/pods/4ea324d5-69e1-4f0b-982f-15b0de3ec539/volumes" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.551413 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0fdc6e58-5fd1-4801-b734-a47fc74c7bac","Type":"ContainerStarted","Data":"5ebff034e341b2b0db68624a7eaa8fb926a63d2a778a7d2303e5e04690095a1f"} Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.575620 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.575745 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"4ea2eb46-81be-409a-9f6b-8775f5458372","Type":"ContainerDied","Data":"bf5c5d10ced4a4c692efcd4379f2eee3b2c972c373664dfba43854f9e4f33372"} Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.575811 4769 scope.go:117] "RemoveContainer" containerID="324d6d2418fce58a565ec648cf71a995ff8b4854821799494437c7cf85e2042c" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.613152 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.639086 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.647423 4769 scope.go:117] "RemoveContainer" containerID="5c0bff317d73f056f37cf0fcc331317bdf7c0cfb69ae4a456b42915a028ac8b0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.650517 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.653433 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.657425 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.657746 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.666163 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.755304 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f01554f0-8651-4121-99d2-65725f73ad2b-logs\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.755560 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f01554f0-8651-4121-99d2-65725f73ad2b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.755713 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f01554f0-8651-4121-99d2-65725f73ad2b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.755789 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f01554f0-8651-4121-99d2-65725f73ad2b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.756087 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f01554f0-8651-4121-99d2-65725f73ad2b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.756191 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f01554f0-8651-4121-99d2-65725f73ad2b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.756359 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g564k\" (UniqueName: \"kubernetes.io/projected/f01554f0-8651-4121-99d2-65725f73ad2b-kube-api-access-g564k\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.756390 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.861812 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f01554f0-8651-4121-99d2-65725f73ad2b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.862206 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f01554f0-8651-4121-99d2-65725f73ad2b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.862269 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f01554f0-8651-4121-99d2-65725f73ad2b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.862301 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f01554f0-8651-4121-99d2-65725f73ad2b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.862347 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g564k\" (UniqueName: \"kubernetes.io/projected/f01554f0-8651-4121-99d2-65725f73ad2b-kube-api-access-g564k\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.862367 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.862439 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f01554f0-8651-4121-99d2-65725f73ad2b-logs\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.862484 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f01554f0-8651-4121-99d2-65725f73ad2b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.862690 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f01554f0-8651-4121-99d2-65725f73ad2b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.862939 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f01554f0-8651-4121-99d2-65725f73ad2b-logs\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.862984 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.869573 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f01554f0-8651-4121-99d2-65725f73ad2b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.871422 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f01554f0-8651-4121-99d2-65725f73ad2b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.873070 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f01554f0-8651-4121-99d2-65725f73ad2b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.884072 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f01554f0-8651-4121-99d2-65725f73ad2b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.888393 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g564k\" (UniqueName: \"kubernetes.io/projected/f01554f0-8651-4121-99d2-65725f73ad2b-kube-api-access-g564k\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.913231 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"f01554f0-8651-4121-99d2-65725f73ad2b\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:07:56 crc kubenswrapper[4769]: I1125 10:07:56.929718 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:07:57 crc kubenswrapper[4769]: I1125 10:07:57.001788 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:07:57 crc kubenswrapper[4769]: I1125 10:07:57.612456 4769 generic.go:334] "Generic (PLEG): container finished" podID="922fd841-fddc-4178-ba91-d3b0d45826a8" containerID="d74f157ebaac55cacb7b0a73bf061a43eb3e5715ac2d9e69c7ae53710777c361" exitCode=0 Nov 25 10:07:57 crc kubenswrapper[4769]: I1125 10:07:57.612520 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xng9l" event={"ID":"922fd841-fddc-4178-ba91-d3b0d45826a8","Type":"ContainerDied","Data":"d74f157ebaac55cacb7b0a73bf061a43eb3e5715ac2d9e69c7ae53710777c361"} Nov 25 10:07:57 crc kubenswrapper[4769]: I1125 10:07:57.635993 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e4dc076f-e68c-4c7e-ae16-f8576de44f48","Type":"ContainerStarted","Data":"ceb08a59a9ffb0a1fd559a652839eccc6174f1fc7a33857232d400a4182e6ea7"} Nov 25 10:07:57 crc kubenswrapper[4769]: I1125 10:07:57.637551 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0fdc6e58-5fd1-4801-b734-a47fc74c7bac","Type":"ContainerStarted","Data":"4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f"} Nov 25 10:07:57 crc kubenswrapper[4769]: I1125 10:07:57.654769 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:07:57 crc kubenswrapper[4769]: W1125 10:07:57.670431 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf01554f0_8651_4121_99d2_65725f73ad2b.slice/crio-dbd08a4b6872a615f4de71bc977084abd373e86c43cc0b76ac727fc59b89ae45 WatchSource:0}: Error finding container dbd08a4b6872a615f4de71bc977084abd373e86c43cc0b76ac727fc59b89ae45: Status 404 returned error can't find the container with id dbd08a4b6872a615f4de71bc977084abd373e86c43cc0b76ac727fc59b89ae45 Nov 25 10:07:58 crc kubenswrapper[4769]: I1125 10:07:58.290080 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ea2eb46-81be-409a-9f6b-8775f5458372" path="/var/lib/kubelet/pods/4ea2eb46-81be-409a-9f6b-8775f5458372/volumes" Nov 25 10:07:58 crc kubenswrapper[4769]: I1125 10:07:58.678714 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xng9l" event={"ID":"922fd841-fddc-4178-ba91-d3b0d45826a8","Type":"ContainerStarted","Data":"b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30"} Nov 25 10:07:58 crc kubenswrapper[4769]: I1125 10:07:58.708253 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xng9l" podStartSLOduration=2.949871229 podStartE2EDuration="7.70822628s" podCreationTimestamp="2025-11-25 10:07:51 +0000 UTC" firstStartedPulling="2025-11-25 10:07:53.423207877 +0000 UTC m=+1422.008180190" lastFinishedPulling="2025-11-25 10:07:58.181562928 +0000 UTC m=+1426.766535241" observedRunningTime="2025-11-25 10:07:58.707233604 +0000 UTC m=+1427.292205917" watchObservedRunningTime="2025-11-25 10:07:58.70822628 +0000 UTC m=+1427.293198583" Nov 25 10:07:58 crc kubenswrapper[4769]: I1125 10:07:58.731722 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e4dc076f-e68c-4c7e-ae16-f8576de44f48","Type":"ContainerStarted","Data":"b3bbef4f02f994b3cf9d9295db8fb129734efbf658cbae3ad2f52091558e956d"} Nov 25 10:07:58 crc kubenswrapper[4769]: I1125 10:07:58.755675 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f01554f0-8651-4121-99d2-65725f73ad2b","Type":"ContainerStarted","Data":"8b5f4d65d99f6f5935b033fe3317208dfc0339dbd2051297964899422346f287"} Nov 25 10:07:58 crc kubenswrapper[4769]: I1125 10:07:58.755744 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f01554f0-8651-4121-99d2-65725f73ad2b","Type":"ContainerStarted","Data":"dbd08a4b6872a615f4de71bc977084abd373e86c43cc0b76ac727fc59b89ae45"} Nov 25 10:07:58 crc kubenswrapper[4769]: I1125 10:07:58.795205 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0fdc6e58-5fd1-4801-b734-a47fc74c7bac","Type":"ContainerStarted","Data":"15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f"} Nov 25 10:07:59 crc kubenswrapper[4769]: I1125 10:07:59.828530 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"e4dc076f-e68c-4c7e-ae16-f8576de44f48","Type":"ContainerStarted","Data":"7c9932b6ae20caf2d8caa1b029ed8ec4cbdaa44e36a6aaad546b059cc41422ef"} Nov 25 10:07:59 crc kubenswrapper[4769]: I1125 10:07:59.841276 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f01554f0-8651-4121-99d2-65725f73ad2b","Type":"ContainerStarted","Data":"e8c25b1991a15e918bd54dbf8af3d9b300f01fe4bf8d012911a645e509354b0b"} Nov 25 10:07:59 crc kubenswrapper[4769]: I1125 10:07:59.848046 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-66b978bc64-9578t" podUID="344a181f-8505-44bd-a107-481eb1e382da" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.211:8000/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:07:59 crc kubenswrapper[4769]: I1125 10:07:59.853404 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0fdc6e58-5fd1-4801-b734-a47fc74c7bac","Type":"ContainerStarted","Data":"126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d"} Nov 25 10:07:59 crc kubenswrapper[4769]: I1125 10:07:59.877978 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.877847961 podStartE2EDuration="4.877847961s" podCreationTimestamp="2025-11-25 10:07:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:07:59.855604611 +0000 UTC m=+1428.440576924" watchObservedRunningTime="2025-11-25 10:07:59.877847961 +0000 UTC m=+1428.462820274" Nov 25 10:07:59 crc kubenswrapper[4769]: I1125 10:07:59.899883 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.899856766 podStartE2EDuration="3.899856766s" podCreationTimestamp="2025-11-25 10:07:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:07:59.885344354 +0000 UTC m=+1428.470316667" watchObservedRunningTime="2025-11-25 10:07:59.899856766 +0000 UTC m=+1428.484829079" Nov 25 10:08:00 crc kubenswrapper[4769]: I1125 10:08:00.648840 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:08:00 crc kubenswrapper[4769]: I1125 10:08:00.714925 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-78c5b49c8c-v7f8v"] Nov 25 10:08:00 crc kubenswrapper[4769]: I1125 10:08:00.897833 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0fdc6e58-5fd1-4801-b734-a47fc74c7bac","Type":"ContainerStarted","Data":"03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d"} Nov 25 10:08:00 crc kubenswrapper[4769]: I1125 10:08:00.898624 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:08:00 crc kubenswrapper[4769]: I1125 10:08:00.952873 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.324544063 podStartE2EDuration="6.952850907s" podCreationTimestamp="2025-11-25 10:07:54 +0000 UTC" firstStartedPulling="2025-11-25 10:07:55.478838687 +0000 UTC m=+1424.063811000" lastFinishedPulling="2025-11-25 10:08:00.107145531 +0000 UTC m=+1428.692117844" observedRunningTime="2025-11-25 10:08:00.931555781 +0000 UTC m=+1429.516528094" watchObservedRunningTime="2025-11-25 10:08:00.952850907 +0000 UTC m=+1429.537823220" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.045732 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.159154 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-59d8894fc7-94vz2"] Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.411438 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-9s2mb"] Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.416166 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-9s2mb" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.421841 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.435466 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-9s2mb"] Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.502990 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-bvfxk"] Nov 25 10:08:01 crc kubenswrapper[4769]: E1125 10:08:01.506558 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16fdfb26-f9e8-40be-88fd-341faa115238" containerName="heat-cfnapi" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.506600 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="16fdfb26-f9e8-40be-88fd-341faa115238" containerName="heat-cfnapi" Nov 25 10:08:01 crc kubenswrapper[4769]: E1125 10:08:01.506631 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16fdfb26-f9e8-40be-88fd-341faa115238" containerName="heat-cfnapi" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.506642 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="16fdfb26-f9e8-40be-88fd-341faa115238" containerName="heat-cfnapi" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.508275 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="16fdfb26-f9e8-40be-88fd-341faa115238" containerName="heat-cfnapi" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.508314 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="16fdfb26-f9e8-40be-88fd-341faa115238" containerName="heat-cfnapi" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.509774 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-bvfxk" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.559022 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-8f09-account-create-gl77m"] Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.561375 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8f09-account-create-gl77m" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.565022 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.592623 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-bvfxk"] Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.596817 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-config-data-custom\") pod \"16fdfb26-f9e8-40be-88fd-341faa115238\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.596885 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdqxf\" (UniqueName: \"kubernetes.io/projected/16fdfb26-f9e8-40be-88fd-341faa115238-kube-api-access-xdqxf\") pod \"16fdfb26-f9e8-40be-88fd-341faa115238\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.597274 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-config-data\") pod \"16fdfb26-f9e8-40be-88fd-341faa115238\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.597345 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-combined-ca-bundle\") pod \"16fdfb26-f9e8-40be-88fd-341faa115238\" (UID: \"16fdfb26-f9e8-40be-88fd-341faa115238\") " Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.597736 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6s6pr\" (UniqueName: \"kubernetes.io/projected/94da5c0c-3b95-4513-9fcb-6ee861294428-kube-api-access-6s6pr\") pod \"nova-api-db-create-9s2mb\" (UID: \"94da5c0c-3b95-4513-9fcb-6ee861294428\") " pod="openstack/nova-api-db-create-9s2mb" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.597954 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94da5c0c-3b95-4513-9fcb-6ee861294428-operator-scripts\") pod \"nova-api-db-create-9s2mb\" (UID: \"94da5c0c-3b95-4513-9fcb-6ee861294428\") " pod="openstack/nova-api-db-create-9s2mb" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.606037 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "16fdfb26-f9e8-40be-88fd-341faa115238" (UID: "16fdfb26-f9e8-40be-88fd-341faa115238"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.606588 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16fdfb26-f9e8-40be-88fd-341faa115238-kube-api-access-xdqxf" (OuterVolumeSpecName: "kube-api-access-xdqxf") pod "16fdfb26-f9e8-40be-88fd-341faa115238" (UID: "16fdfb26-f9e8-40be-88fd-341faa115238"). InnerVolumeSpecName "kube-api-access-xdqxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.623750 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-8f09-account-create-gl77m"] Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.650847 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "16fdfb26-f9e8-40be-88fd-341faa115238" (UID: "16fdfb26-f9e8-40be-88fd-341faa115238"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.711090 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94da5c0c-3b95-4513-9fcb-6ee861294428-operator-scripts\") pod \"nova-api-db-create-9s2mb\" (UID: \"94da5c0c-3b95-4513-9fcb-6ee861294428\") " pod="openstack/nova-api-db-create-9s2mb" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.711265 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6s6pr\" (UniqueName: \"kubernetes.io/projected/94da5c0c-3b95-4513-9fcb-6ee861294428-kube-api-access-6s6pr\") pod \"nova-api-db-create-9s2mb\" (UID: \"94da5c0c-3b95-4513-9fcb-6ee861294428\") " pod="openstack/nova-api-db-create-9s2mb" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.711460 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74zqz\" (UniqueName: \"kubernetes.io/projected/219e181e-2dbc-410e-aa4e-e9c843fb2aa9-kube-api-access-74zqz\") pod \"nova-cell0-db-create-bvfxk\" (UID: \"219e181e-2dbc-410e-aa4e-e9c843fb2aa9\") " pod="openstack/nova-cell0-db-create-bvfxk" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.711523 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/219e181e-2dbc-410e-aa4e-e9c843fb2aa9-operator-scripts\") pod \"nova-cell0-db-create-bvfxk\" (UID: \"219e181e-2dbc-410e-aa4e-e9c843fb2aa9\") " pod="openstack/nova-cell0-db-create-bvfxk" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.711567 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wm95f\" (UniqueName: \"kubernetes.io/projected/a43c3555-7a5d-4824-a913-98e74014c48c-kube-api-access-wm95f\") pod \"nova-api-8f09-account-create-gl77m\" (UID: \"a43c3555-7a5d-4824-a913-98e74014c48c\") " pod="openstack/nova-api-8f09-account-create-gl77m" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.711643 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a43c3555-7a5d-4824-a913-98e74014c48c-operator-scripts\") pod \"nova-api-8f09-account-create-gl77m\" (UID: \"a43c3555-7a5d-4824-a913-98e74014c48c\") " pod="openstack/nova-api-8f09-account-create-gl77m" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.711881 4769 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.711896 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdqxf\" (UniqueName: \"kubernetes.io/projected/16fdfb26-f9e8-40be-88fd-341faa115238-kube-api-access-xdqxf\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.711912 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.713379 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94da5c0c-3b95-4513-9fcb-6ee861294428-operator-scripts\") pod \"nova-api-db-create-9s2mb\" (UID: \"94da5c0c-3b95-4513-9fcb-6ee861294428\") " pod="openstack/nova-api-db-create-9s2mb" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.761477 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6s6pr\" (UniqueName: \"kubernetes.io/projected/94da5c0c-3b95-4513-9fcb-6ee861294428-kube-api-access-6s6pr\") pod \"nova-api-db-create-9s2mb\" (UID: \"94da5c0c-3b95-4513-9fcb-6ee861294428\") " pod="openstack/nova-api-db-create-9s2mb" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.779449 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-72k77"] Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.783493 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-72k77" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.787871 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-9s2mb" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.789731 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-f2a2-account-create-q65xr"] Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.804669 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f2a2-account-create-q65xr" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.808686 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.811388 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-72k77"] Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.815146 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74zqz\" (UniqueName: \"kubernetes.io/projected/219e181e-2dbc-410e-aa4e-e9c843fb2aa9-kube-api-access-74zqz\") pod \"nova-cell0-db-create-bvfxk\" (UID: \"219e181e-2dbc-410e-aa4e-e9c843fb2aa9\") " pod="openstack/nova-cell0-db-create-bvfxk" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.815193 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/219e181e-2dbc-410e-aa4e-e9c843fb2aa9-operator-scripts\") pod \"nova-cell0-db-create-bvfxk\" (UID: \"219e181e-2dbc-410e-aa4e-e9c843fb2aa9\") " pod="openstack/nova-cell0-db-create-bvfxk" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.815228 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wm95f\" (UniqueName: \"kubernetes.io/projected/a43c3555-7a5d-4824-a913-98e74014c48c-kube-api-access-wm95f\") pod \"nova-api-8f09-account-create-gl77m\" (UID: \"a43c3555-7a5d-4824-a913-98e74014c48c\") " pod="openstack/nova-api-8f09-account-create-gl77m" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.815276 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a43c3555-7a5d-4824-a913-98e74014c48c-operator-scripts\") pod \"nova-api-8f09-account-create-gl77m\" (UID: \"a43c3555-7a5d-4824-a913-98e74014c48c\") " pod="openstack/nova-api-8f09-account-create-gl77m" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.816300 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a43c3555-7a5d-4824-a913-98e74014c48c-operator-scripts\") pod \"nova-api-8f09-account-create-gl77m\" (UID: \"a43c3555-7a5d-4824-a913-98e74014c48c\") " pod="openstack/nova-api-8f09-account-create-gl77m" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.816991 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/219e181e-2dbc-410e-aa4e-e9c843fb2aa9-operator-scripts\") pod \"nova-cell0-db-create-bvfxk\" (UID: \"219e181e-2dbc-410e-aa4e-e9c843fb2aa9\") " pod="openstack/nova-cell0-db-create-bvfxk" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.834444 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f2a2-account-create-q65xr"] Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.842220 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wm95f\" (UniqueName: \"kubernetes.io/projected/a43c3555-7a5d-4824-a913-98e74014c48c-kube-api-access-wm95f\") pod \"nova-api-8f09-account-create-gl77m\" (UID: \"a43c3555-7a5d-4824-a913-98e74014c48c\") " pod="openstack/nova-api-8f09-account-create-gl77m" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.842747 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74zqz\" (UniqueName: \"kubernetes.io/projected/219e181e-2dbc-410e-aa4e-e9c843fb2aa9-kube-api-access-74zqz\") pod \"nova-cell0-db-create-bvfxk\" (UID: \"219e181e-2dbc-410e-aa4e-e9c843fb2aa9\") " pod="openstack/nova-cell0-db-create-bvfxk" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.847521 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-bvfxk" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.849973 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-config-data" (OuterVolumeSpecName: "config-data") pod "16fdfb26-f9e8-40be-88fd-341faa115238" (UID: "16fdfb26-f9e8-40be-88fd-341faa115238"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.890400 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.894498 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8f09-account-create-gl77m" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.915370 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.923911 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.931467 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0335a31-5586-412d-b8fd-740763f60392-operator-scripts\") pod \"nova-cell1-db-create-72k77\" (UID: \"e0335a31-5586-412d-b8fd-740763f60392\") " pod="openstack/nova-cell1-db-create-72k77" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.931982 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2-operator-scripts\") pod \"nova-cell0-f2a2-account-create-q65xr\" (UID: \"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2\") " pod="openstack/nova-cell0-f2a2-account-create-q65xr" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.932109 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66dsv\" (UniqueName: \"kubernetes.io/projected/e0335a31-5586-412d-b8fd-740763f60392-kube-api-access-66dsv\") pod \"nova-cell1-db-create-72k77\" (UID: \"e0335a31-5586-412d-b8fd-740763f60392\") " pod="openstack/nova-cell1-db-create-72k77" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.932155 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm6xv\" (UniqueName: \"kubernetes.io/projected/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2-kube-api-access-sm6xv\") pod \"nova-cell0-f2a2-account-create-q65xr\" (UID: \"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2\") " pod="openstack/nova-cell0-f2a2-account-create-q65xr" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.932538 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16fdfb26-f9e8-40be-88fd-341faa115238-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.932615 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-b9e5-account-create-8s7mq"] Nov 25 10:08:01 crc kubenswrapper[4769]: E1125 10:08:01.941298 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96b52ba9-e2dc-4a75-8001-b2ce075634e7" containerName="heat-api" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.941345 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="96b52ba9-e2dc-4a75-8001-b2ce075634e7" containerName="heat-api" Nov 25 10:08:01 crc kubenswrapper[4769]: E1125 10:08:01.941385 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96b52ba9-e2dc-4a75-8001-b2ce075634e7" containerName="heat-api" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.941395 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="96b52ba9-e2dc-4a75-8001-b2ce075634e7" containerName="heat-api" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.941931 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="96b52ba9-e2dc-4a75-8001-b2ce075634e7" containerName="heat-api" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.943032 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b9e5-account-create-8s7mq" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.955360 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 25 10:08:01 crc kubenswrapper[4769]: I1125 10:08:01.969860 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-b9e5-account-create-8s7mq"] Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.004337 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.004361 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-78c5b49c8c-v7f8v" event={"ID":"16fdfb26-f9e8-40be-88fd-341faa115238","Type":"ContainerDied","Data":"fe69187cb730e12878de9557c7ad5443b01695435c8a8d1359d65be35cb1b2e9"} Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.004406 4769 scope.go:117] "RemoveContainer" containerID="9177dbd875a9c38ad4ea9bdbd80eb53eecb6c8629e70060bc18969722cd5be23" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.038008 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hspm\" (UniqueName: \"kubernetes.io/projected/96b52ba9-e2dc-4a75-8001-b2ce075634e7-kube-api-access-5hspm\") pod \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.038222 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-combined-ca-bundle\") pod \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.038299 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-config-data\") pod \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.038489 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-config-data-custom\") pod \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\" (UID: \"96b52ba9-e2dc-4a75-8001-b2ce075634e7\") " Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.038857 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0335a31-5586-412d-b8fd-740763f60392-operator-scripts\") pod \"nova-cell1-db-create-72k77\" (UID: \"e0335a31-5586-412d-b8fd-740763f60392\") " pod="openstack/nova-cell1-db-create-72k77" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.039045 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/352ab718-d89c-430d-a7ca-a7ad712d63d6-operator-scripts\") pod \"nova-cell1-b9e5-account-create-8s7mq\" (UID: \"352ab718-d89c-430d-a7ca-a7ad712d63d6\") " pod="openstack/nova-cell1-b9e5-account-create-8s7mq" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.039082 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgx65\" (UniqueName: \"kubernetes.io/projected/352ab718-d89c-430d-a7ca-a7ad712d63d6-kube-api-access-hgx65\") pod \"nova-cell1-b9e5-account-create-8s7mq\" (UID: \"352ab718-d89c-430d-a7ca-a7ad712d63d6\") " pod="openstack/nova-cell1-b9e5-account-create-8s7mq" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.039105 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2-operator-scripts\") pod \"nova-cell0-f2a2-account-create-q65xr\" (UID: \"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2\") " pod="openstack/nova-cell0-f2a2-account-create-q65xr" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.039166 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66dsv\" (UniqueName: \"kubernetes.io/projected/e0335a31-5586-412d-b8fd-740763f60392-kube-api-access-66dsv\") pod \"nova-cell1-db-create-72k77\" (UID: \"e0335a31-5586-412d-b8fd-740763f60392\") " pod="openstack/nova-cell1-db-create-72k77" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.039202 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm6xv\" (UniqueName: \"kubernetes.io/projected/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2-kube-api-access-sm6xv\") pod \"nova-cell0-f2a2-account-create-q65xr\" (UID: \"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2\") " pod="openstack/nova-cell0-f2a2-account-create-q65xr" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.040806 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2-operator-scripts\") pod \"nova-cell0-f2a2-account-create-q65xr\" (UID: \"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2\") " pod="openstack/nova-cell0-f2a2-account-create-q65xr" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.042236 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0335a31-5586-412d-b8fd-740763f60392-operator-scripts\") pod \"nova-cell1-db-create-72k77\" (UID: \"e0335a31-5586-412d-b8fd-740763f60392\") " pod="openstack/nova-cell1-db-create-72k77" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.047270 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-59d8894fc7-94vz2" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.047234 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-59d8894fc7-94vz2" event={"ID":"96b52ba9-e2dc-4a75-8001-b2ce075634e7","Type":"ContainerDied","Data":"83f31ab85fa4812ad43f7eabb8ee3e1df7905e7cb6bdfb0dd903d2fc94534a4d"} Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.058530 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b52ba9-e2dc-4a75-8001-b2ce075634e7-kube-api-access-5hspm" (OuterVolumeSpecName: "kube-api-access-5hspm") pod "96b52ba9-e2dc-4a75-8001-b2ce075634e7" (UID: "96b52ba9-e2dc-4a75-8001-b2ce075634e7"). InnerVolumeSpecName "kube-api-access-5hspm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.061622 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-78c5b49c8c-v7f8v"] Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.063598 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66dsv\" (UniqueName: \"kubernetes.io/projected/e0335a31-5586-412d-b8fd-740763f60392-kube-api-access-66dsv\") pod \"nova-cell1-db-create-72k77\" (UID: \"e0335a31-5586-412d-b8fd-740763f60392\") " pod="openstack/nova-cell1-db-create-72k77" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.066997 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "96b52ba9-e2dc-4a75-8001-b2ce075634e7" (UID: "96b52ba9-e2dc-4a75-8001-b2ce075634e7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.068483 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm6xv\" (UniqueName: \"kubernetes.io/projected/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2-kube-api-access-sm6xv\") pod \"nova-cell0-f2a2-account-create-q65xr\" (UID: \"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2\") " pod="openstack/nova-cell0-f2a2-account-create-q65xr" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.082527 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-78c5b49c8c-v7f8v"] Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.136770 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "96b52ba9-e2dc-4a75-8001-b2ce075634e7" (UID: "96b52ba9-e2dc-4a75-8001-b2ce075634e7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.143745 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/352ab718-d89c-430d-a7ca-a7ad712d63d6-operator-scripts\") pod \"nova-cell1-b9e5-account-create-8s7mq\" (UID: \"352ab718-d89c-430d-a7ca-a7ad712d63d6\") " pod="openstack/nova-cell1-b9e5-account-create-8s7mq" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.143801 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgx65\" (UniqueName: \"kubernetes.io/projected/352ab718-d89c-430d-a7ca-a7ad712d63d6-kube-api-access-hgx65\") pod \"nova-cell1-b9e5-account-create-8s7mq\" (UID: \"352ab718-d89c-430d-a7ca-a7ad712d63d6\") " pod="openstack/nova-cell1-b9e5-account-create-8s7mq" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.144103 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.144132 4769 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.144142 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hspm\" (UniqueName: \"kubernetes.io/projected/96b52ba9-e2dc-4a75-8001-b2ce075634e7-kube-api-access-5hspm\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.147600 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/352ab718-d89c-430d-a7ca-a7ad712d63d6-operator-scripts\") pod \"nova-cell1-b9e5-account-create-8s7mq\" (UID: \"352ab718-d89c-430d-a7ca-a7ad712d63d6\") " pod="openstack/nova-cell1-b9e5-account-create-8s7mq" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.156259 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-72k77" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.169350 4769 scope.go:117] "RemoveContainer" containerID="8093caaf525c4c96d62dc6649237a6df0b7d322ea1f4d46279bc805fa3c12362" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.170152 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgx65\" (UniqueName: \"kubernetes.io/projected/352ab718-d89c-430d-a7ca-a7ad712d63d6-kube-api-access-hgx65\") pod \"nova-cell1-b9e5-account-create-8s7mq\" (UID: \"352ab718-d89c-430d-a7ca-a7ad712d63d6\") " pod="openstack/nova-cell1-b9e5-account-create-8s7mq" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.176302 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f2a2-account-create-q65xr" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.189015 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-config-data" (OuterVolumeSpecName: "config-data") pod "96b52ba9-e2dc-4a75-8001-b2ce075634e7" (UID: "96b52ba9-e2dc-4a75-8001-b2ce075634e7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.207341 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.247596 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/96b52ba9-e2dc-4a75-8001-b2ce075634e7-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.277747 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16fdfb26-f9e8-40be-88fd-341faa115238" path="/var/lib/kubelet/pods/16fdfb26-f9e8-40be-88fd-341faa115238/volumes" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.306002 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b9e5-account-create-8s7mq" Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.439565 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-9s2mb"] Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.738080 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-bvfxk"] Nov 25 10:08:02 crc kubenswrapper[4769]: I1125 10:08:02.971511 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-59d8894fc7-94vz2"] Nov 25 10:08:03 crc kubenswrapper[4769]: I1125 10:08:03.030741 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-59d8894fc7-94vz2"] Nov 25 10:08:03 crc kubenswrapper[4769]: I1125 10:08:03.090733 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-9s2mb" event={"ID":"94da5c0c-3b95-4513-9fcb-6ee861294428","Type":"ContainerStarted","Data":"dbdb9cfd8ffb9c36c9056728ea53a2e42503dad4cdc848cf863027d61d07182c"} Nov 25 10:08:03 crc kubenswrapper[4769]: I1125 10:08:03.094269 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-bvfxk" event={"ID":"219e181e-2dbc-410e-aa4e-e9c843fb2aa9","Type":"ContainerStarted","Data":"0e1778a3b64d9ee8678afa45a4ac19d9567e8b3dc876bbf9126408bbd8ab2144"} Nov 25 10:08:03 crc kubenswrapper[4769]: I1125 10:08:03.238105 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:08:03 crc kubenswrapper[4769]: I1125 10:08:03.300196 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-8f09-account-create-gl77m"] Nov 25 10:08:03 crc kubenswrapper[4769]: I1125 10:08:03.366551 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xng9l"] Nov 25 10:08:03 crc kubenswrapper[4769]: I1125 10:08:03.454448 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f2a2-account-create-q65xr"] Nov 25 10:08:03 crc kubenswrapper[4769]: I1125 10:08:03.571317 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-72k77"] Nov 25 10:08:03 crc kubenswrapper[4769]: I1125 10:08:03.748357 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-b9e5-account-create-8s7mq"] Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.128994 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b9e5-account-create-8s7mq" event={"ID":"352ab718-d89c-430d-a7ca-a7ad712d63d6","Type":"ContainerStarted","Data":"b68fd3a8fa6244da0431b838ee009927068922f3562f350aac98010c51fc228d"} Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.129065 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b9e5-account-create-8s7mq" event={"ID":"352ab718-d89c-430d-a7ca-a7ad712d63d6","Type":"ContainerStarted","Data":"45c846cbd03bf76db2d049a87e1eaf7adb6aa1ae84f77dca2dec204481f33818"} Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.134908 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-72k77" event={"ID":"e0335a31-5586-412d-b8fd-740763f60392","Type":"ContainerStarted","Data":"ebda9b29ca57389ef6aa287e331a81143559af722bc39a5aff969a40f690ee8b"} Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.134943 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-72k77" event={"ID":"e0335a31-5586-412d-b8fd-740763f60392","Type":"ContainerStarted","Data":"9117eaa71dab486cb037b0ef0093de83bce8d8bbdc4e7c9e7e1bb7988872d52a"} Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.137580 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"be068e15-9a8b-472c-9a66-8ee06cf2491f","Type":"ContainerStarted","Data":"926daaca1272bd41fa80098835d5a7d0bb3d26e2ae984ea23b0d6bd5d17db92e"} Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.143019 4769 generic.go:334] "Generic (PLEG): container finished" podID="219e181e-2dbc-410e-aa4e-e9c843fb2aa9" containerID="d67cfa2ad39238530eb0c1a19050ada8df3c3d5bf120b837d572fc813c1fb668" exitCode=0 Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.143077 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-bvfxk" event={"ID":"219e181e-2dbc-410e-aa4e-e9c843fb2aa9","Type":"ContainerDied","Data":"d67cfa2ad39238530eb0c1a19050ada8df3c3d5bf120b837d572fc813c1fb668"} Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.153826 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-b9e5-account-create-8s7mq" podStartSLOduration=3.153805968 podStartE2EDuration="3.153805968s" podCreationTimestamp="2025-11-25 10:08:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:08:04.152518815 +0000 UTC m=+1432.737491128" watchObservedRunningTime="2025-11-25 10:08:04.153805968 +0000 UTC m=+1432.738778281" Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.158506 4769 generic.go:334] "Generic (PLEG): container finished" podID="94da5c0c-3b95-4513-9fcb-6ee861294428" containerID="31015313ac4f19d93a6c724e068a1d06894410010be4d3cfefa48daf7947dab8" exitCode=0 Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.158595 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-9s2mb" event={"ID":"94da5c0c-3b95-4513-9fcb-6ee861294428","Type":"ContainerDied","Data":"31015313ac4f19d93a6c724e068a1d06894410010be4d3cfefa48daf7947dab8"} Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.171347 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f2a2-account-create-q65xr" event={"ID":"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2","Type":"ContainerStarted","Data":"79f38451ad711cb20cd3e2e39cda77b9320d32f1dcf74b914118ff6d66f2f6cc"} Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.171399 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f2a2-account-create-q65xr" event={"ID":"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2","Type":"ContainerStarted","Data":"43970803e65f497a4b1cfd8adea59c7cc41359da41c1b5015fd6e95d2ad6a95d"} Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.175062 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8f09-account-create-gl77m" event={"ID":"a43c3555-7a5d-4824-a913-98e74014c48c","Type":"ContainerStarted","Data":"a2ad5432078c4545b460768afec3bcf1b98236aeb7a0b03712a4a1f5a7eb3044"} Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.175140 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8f09-account-create-gl77m" event={"ID":"a43c3555-7a5d-4824-a913-98e74014c48c","Type":"ContainerStarted","Data":"2983a325f197b442fd65ade8176fea5c45dffcc42c1af3af2c0ec020e235f387"} Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.186673 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.67760086 podStartE2EDuration="39.18665208s" podCreationTimestamp="2025-11-25 10:07:25 +0000 UTC" firstStartedPulling="2025-11-25 10:07:26.272844916 +0000 UTC m=+1394.857817229" lastFinishedPulling="2025-11-25 10:08:01.781896136 +0000 UTC m=+1430.366868449" observedRunningTime="2025-11-25 10:08:04.172142188 +0000 UTC m=+1432.757114501" watchObservedRunningTime="2025-11-25 10:08:04.18665208 +0000 UTC m=+1432.771624413" Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.207996 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-72k77" podStartSLOduration=3.207953877 podStartE2EDuration="3.207953877s" podCreationTimestamp="2025-11-25 10:08:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:08:04.198425902 +0000 UTC m=+1432.783398215" watchObservedRunningTime="2025-11-25 10:08:04.207953877 +0000 UTC m=+1432.792926190" Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.257647 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-8f09-account-create-gl77m" podStartSLOduration=3.25762381 podStartE2EDuration="3.25762381s" podCreationTimestamp="2025-11-25 10:08:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:08:04.256518242 +0000 UTC m=+1432.841490555" watchObservedRunningTime="2025-11-25 10:08:04.25762381 +0000 UTC m=+1432.842596123" Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.282848 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b52ba9-e2dc-4a75-8001-b2ce075634e7" path="/var/lib/kubelet/pods/96b52ba9-e2dc-4a75-8001-b2ce075634e7/volumes" Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.314815 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-f2a2-account-create-q65xr" podStartSLOduration=3.314794416 podStartE2EDuration="3.314794416s" podCreationTimestamp="2025-11-25 10:08:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:08:04.298907529 +0000 UTC m=+1432.883879842" watchObservedRunningTime="2025-11-25 10:08:04.314794416 +0000 UTC m=+1432.899766729" Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.763655 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.764539 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="ceilometer-central-agent" containerID="cri-o://4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f" gracePeriod=30 Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.765425 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="proxy-httpd" containerID="cri-o://03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d" gracePeriod=30 Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.765502 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="sg-core" containerID="cri-o://126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d" gracePeriod=30 Nov 25 10:08:04 crc kubenswrapper[4769]: I1125 10:08:04.765555 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="ceilometer-notification-agent" containerID="cri-o://15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f" gracePeriod=30 Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.192079 4769 generic.go:334] "Generic (PLEG): container finished" podID="c5017f20-1e2f-44e6-9b68-f8b64ca42cc2" containerID="79f38451ad711cb20cd3e2e39cda77b9320d32f1dcf74b914118ff6d66f2f6cc" exitCode=0 Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.192223 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f2a2-account-create-q65xr" event={"ID":"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2","Type":"ContainerDied","Data":"79f38451ad711cb20cd3e2e39cda77b9320d32f1dcf74b914118ff6d66f2f6cc"} Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.208377 4769 generic.go:334] "Generic (PLEG): container finished" podID="a43c3555-7a5d-4824-a913-98e74014c48c" containerID="a2ad5432078c4545b460768afec3bcf1b98236aeb7a0b03712a4a1f5a7eb3044" exitCode=0 Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.208699 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8f09-account-create-gl77m" event={"ID":"a43c3555-7a5d-4824-a913-98e74014c48c","Type":"ContainerDied","Data":"a2ad5432078c4545b460768afec3bcf1b98236aeb7a0b03712a4a1f5a7eb3044"} Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.225356 4769 generic.go:334] "Generic (PLEG): container finished" podID="352ab718-d89c-430d-a7ca-a7ad712d63d6" containerID="b68fd3a8fa6244da0431b838ee009927068922f3562f350aac98010c51fc228d" exitCode=0 Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.225441 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b9e5-account-create-8s7mq" event={"ID":"352ab718-d89c-430d-a7ca-a7ad712d63d6","Type":"ContainerDied","Data":"b68fd3a8fa6244da0431b838ee009927068922f3562f350aac98010c51fc228d"} Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.242840 4769 generic.go:334] "Generic (PLEG): container finished" podID="e0335a31-5586-412d-b8fd-740763f60392" containerID="ebda9b29ca57389ef6aa287e331a81143559af722bc39a5aff969a40f690ee8b" exitCode=0 Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.242932 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-72k77" event={"ID":"e0335a31-5586-412d-b8fd-740763f60392","Type":"ContainerDied","Data":"ebda9b29ca57389ef6aa287e331a81143559af722bc39a5aff969a40f690ee8b"} Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.265860 4769 generic.go:334] "Generic (PLEG): container finished" podID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerID="03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d" exitCode=0 Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.265894 4769 generic.go:334] "Generic (PLEG): container finished" podID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerID="126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d" exitCode=2 Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.266110 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0fdc6e58-5fd1-4801-b734-a47fc74c7bac","Type":"ContainerDied","Data":"03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d"} Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.266137 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0fdc6e58-5fd1-4801-b734-a47fc74c7bac","Type":"ContainerDied","Data":"126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d"} Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.266279 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xng9l" podUID="922fd841-fddc-4178-ba91-d3b0d45826a8" containerName="registry-server" containerID="cri-o://b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30" gracePeriod=2 Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.778450 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-9s2mb" Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.905040 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-bvfxk" Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.919873 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6s6pr\" (UniqueName: \"kubernetes.io/projected/94da5c0c-3b95-4513-9fcb-6ee861294428-kube-api-access-6s6pr\") pod \"94da5c0c-3b95-4513-9fcb-6ee861294428\" (UID: \"94da5c0c-3b95-4513-9fcb-6ee861294428\") " Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.920249 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94da5c0c-3b95-4513-9fcb-6ee861294428-operator-scripts\") pod \"94da5c0c-3b95-4513-9fcb-6ee861294428\" (UID: \"94da5c0c-3b95-4513-9fcb-6ee861294428\") " Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.921454 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94da5c0c-3b95-4513-9fcb-6ee861294428-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "94da5c0c-3b95-4513-9fcb-6ee861294428" (UID: "94da5c0c-3b95-4513-9fcb-6ee861294428"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.922032 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94da5c0c-3b95-4513-9fcb-6ee861294428-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:05 crc kubenswrapper[4769]: I1125 10:08:05.928728 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94da5c0c-3b95-4513-9fcb-6ee861294428-kube-api-access-6s6pr" (OuterVolumeSpecName: "kube-api-access-6s6pr") pod "94da5c0c-3b95-4513-9fcb-6ee861294428" (UID: "94da5c0c-3b95-4513-9fcb-6ee861294428"). InnerVolumeSpecName "kube-api-access-6s6pr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.019121 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.023975 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-74zqz\" (UniqueName: \"kubernetes.io/projected/219e181e-2dbc-410e-aa4e-e9c843fb2aa9-kube-api-access-74zqz\") pod \"219e181e-2dbc-410e-aa4e-e9c843fb2aa9\" (UID: \"219e181e-2dbc-410e-aa4e-e9c843fb2aa9\") " Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.024204 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/219e181e-2dbc-410e-aa4e-e9c843fb2aa9-operator-scripts\") pod \"219e181e-2dbc-410e-aa4e-e9c843fb2aa9\" (UID: \"219e181e-2dbc-410e-aa4e-e9c843fb2aa9\") " Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.024772 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/219e181e-2dbc-410e-aa4e-e9c843fb2aa9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "219e181e-2dbc-410e-aa4e-e9c843fb2aa9" (UID: "219e181e-2dbc-410e-aa4e-e9c843fb2aa9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.025456 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6s6pr\" (UniqueName: \"kubernetes.io/projected/94da5c0c-3b95-4513-9fcb-6ee861294428-kube-api-access-6s6pr\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.025490 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/219e181e-2dbc-410e-aa4e-e9c843fb2aa9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.037681 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/219e181e-2dbc-410e-aa4e-e9c843fb2aa9-kube-api-access-74zqz" (OuterVolumeSpecName: "kube-api-access-74zqz") pod "219e181e-2dbc-410e-aa4e-e9c843fb2aa9" (UID: "219e181e-2dbc-410e-aa4e-e9c843fb2aa9"). InnerVolumeSpecName "kube-api-access-74zqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.128067 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/922fd841-fddc-4178-ba91-d3b0d45826a8-catalog-content\") pod \"922fd841-fddc-4178-ba91-d3b0d45826a8\" (UID: \"922fd841-fddc-4178-ba91-d3b0d45826a8\") " Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.128237 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/922fd841-fddc-4178-ba91-d3b0d45826a8-utilities\") pod \"922fd841-fddc-4178-ba91-d3b0d45826a8\" (UID: \"922fd841-fddc-4178-ba91-d3b0d45826a8\") " Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.128336 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhb86\" (UniqueName: \"kubernetes.io/projected/922fd841-fddc-4178-ba91-d3b0d45826a8-kube-api-access-qhb86\") pod \"922fd841-fddc-4178-ba91-d3b0d45826a8\" (UID: \"922fd841-fddc-4178-ba91-d3b0d45826a8\") " Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.129394 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-74zqz\" (UniqueName: \"kubernetes.io/projected/219e181e-2dbc-410e-aa4e-e9c843fb2aa9-kube-api-access-74zqz\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.130337 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/922fd841-fddc-4178-ba91-d3b0d45826a8-utilities" (OuterVolumeSpecName: "utilities") pod "922fd841-fddc-4178-ba91-d3b0d45826a8" (UID: "922fd841-fddc-4178-ba91-d3b0d45826a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.136231 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/922fd841-fddc-4178-ba91-d3b0d45826a8-kube-api-access-qhb86" (OuterVolumeSpecName: "kube-api-access-qhb86") pod "922fd841-fddc-4178-ba91-d3b0d45826a8" (UID: "922fd841-fddc-4178-ba91-d3b0d45826a8"). InnerVolumeSpecName "kube-api-access-qhb86". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.233867 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/922fd841-fddc-4178-ba91-d3b0d45826a8-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.233935 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhb86\" (UniqueName: \"kubernetes.io/projected/922fd841-fddc-4178-ba91-d3b0d45826a8-kube-api-access-qhb86\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.234050 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.234076 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.286594 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/922fd841-fddc-4178-ba91-d3b0d45826a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "922fd841-fddc-4178-ba91-d3b0d45826a8" (UID: "922fd841-fddc-4178-ba91-d3b0d45826a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.322246 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.330883 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.337138 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/922fd841-fddc-4178-ba91-d3b0d45826a8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.352600 4769 generic.go:334] "Generic (PLEG): container finished" podID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerID="15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f" exitCode=0 Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.352767 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0fdc6e58-5fd1-4801-b734-a47fc74c7bac","Type":"ContainerDied","Data":"15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f"} Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.361037 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-bvfxk" event={"ID":"219e181e-2dbc-410e-aa4e-e9c843fb2aa9","Type":"ContainerDied","Data":"0e1778a3b64d9ee8678afa45a4ac19d9567e8b3dc876bbf9126408bbd8ab2144"} Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.361080 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-bvfxk" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.361088 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e1778a3b64d9ee8678afa45a4ac19d9567e8b3dc876bbf9126408bbd8ab2144" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.369060 4769 generic.go:334] "Generic (PLEG): container finished" podID="922fd841-fddc-4178-ba91-d3b0d45826a8" containerID="b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30" exitCode=0 Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.369243 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xng9l" event={"ID":"922fd841-fddc-4178-ba91-d3b0d45826a8","Type":"ContainerDied","Data":"b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30"} Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.369402 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xng9l" event={"ID":"922fd841-fddc-4178-ba91-d3b0d45826a8","Type":"ContainerDied","Data":"5c14b3313e615835173fd5e59af260fd3190b5559db1f8f7bf299963f41a1ad4"} Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.369505 4769 scope.go:117] "RemoveContainer" containerID="b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.369831 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xng9l" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.380758 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-9s2mb" event={"ID":"94da5c0c-3b95-4513-9fcb-6ee861294428","Type":"ContainerDied","Data":"dbdb9cfd8ffb9c36c9056728ea53a2e42503dad4cdc848cf863027d61d07182c"} Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.380809 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbdb9cfd8ffb9c36c9056728ea53a2e42503dad4cdc848cf863027d61d07182c" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.380851 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-9s2mb" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.382538 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.382591 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.422011 4769 scope.go:117] "RemoveContainer" containerID="d74f157ebaac55cacb7b0a73bf061a43eb3e5715ac2d9e69c7ae53710777c361" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.430574 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xng9l"] Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.441284 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xng9l"] Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.587446 4769 scope.go:117] "RemoveContainer" containerID="496d2b64547a1021ab173ee2c375f9f68423abfe0918e8077377ceb4b2954ffe" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.641915 4769 scope.go:117] "RemoveContainer" containerID="b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30" Nov 25 10:08:06 crc kubenswrapper[4769]: E1125 10:08:06.642919 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30\": container with ID starting with b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30 not found: ID does not exist" containerID="b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.642997 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30"} err="failed to get container status \"b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30\": rpc error: code = NotFound desc = could not find container \"b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30\": container with ID starting with b82689e7da0d42083d8e0103ba41079a6eb14a7544c2b45f3ea55fbaa095df30 not found: ID does not exist" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.643035 4769 scope.go:117] "RemoveContainer" containerID="d74f157ebaac55cacb7b0a73bf061a43eb3e5715ac2d9e69c7ae53710777c361" Nov 25 10:08:06 crc kubenswrapper[4769]: E1125 10:08:06.644346 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d74f157ebaac55cacb7b0a73bf061a43eb3e5715ac2d9e69c7ae53710777c361\": container with ID starting with d74f157ebaac55cacb7b0a73bf061a43eb3e5715ac2d9e69c7ae53710777c361 not found: ID does not exist" containerID="d74f157ebaac55cacb7b0a73bf061a43eb3e5715ac2d9e69c7ae53710777c361" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.644380 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d74f157ebaac55cacb7b0a73bf061a43eb3e5715ac2d9e69c7ae53710777c361"} err="failed to get container status \"d74f157ebaac55cacb7b0a73bf061a43eb3e5715ac2d9e69c7ae53710777c361\": rpc error: code = NotFound desc = could not find container \"d74f157ebaac55cacb7b0a73bf061a43eb3e5715ac2d9e69c7ae53710777c361\": container with ID starting with d74f157ebaac55cacb7b0a73bf061a43eb3e5715ac2d9e69c7ae53710777c361 not found: ID does not exist" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.644399 4769 scope.go:117] "RemoveContainer" containerID="496d2b64547a1021ab173ee2c375f9f68423abfe0918e8077377ceb4b2954ffe" Nov 25 10:08:06 crc kubenswrapper[4769]: E1125 10:08:06.645484 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"496d2b64547a1021ab173ee2c375f9f68423abfe0918e8077377ceb4b2954ffe\": container with ID starting with 496d2b64547a1021ab173ee2c375f9f68423abfe0918e8077377ceb4b2954ffe not found: ID does not exist" containerID="496d2b64547a1021ab173ee2c375f9f68423abfe0918e8077377ceb4b2954ffe" Nov 25 10:08:06 crc kubenswrapper[4769]: I1125 10:08:06.645527 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"496d2b64547a1021ab173ee2c375f9f68423abfe0918e8077377ceb4b2954ffe"} err="failed to get container status \"496d2b64547a1021ab173ee2c375f9f68423abfe0918e8077377ceb4b2954ffe\": rpc error: code = NotFound desc = could not find container \"496d2b64547a1021ab173ee2c375f9f68423abfe0918e8077377ceb4b2954ffe\": container with ID starting with 496d2b64547a1021ab173ee2c375f9f68423abfe0918e8077377ceb4b2954ffe not found: ID does not exist" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.009300 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.009358 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.027969 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f2a2-account-create-q65xr" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.064401 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.064477 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.065393 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2-operator-scripts\") pod \"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2\" (UID: \"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2\") " Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.066219 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sm6xv\" (UniqueName: \"kubernetes.io/projected/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2-kube-api-access-sm6xv\") pod \"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2\" (UID: \"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2\") " Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.072876 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2-kube-api-access-sm6xv" (OuterVolumeSpecName: "kube-api-access-sm6xv") pod "c5017f20-1e2f-44e6-9b68-f8b64ca42cc2" (UID: "c5017f20-1e2f-44e6-9b68-f8b64ca42cc2"). InnerVolumeSpecName "kube-api-access-sm6xv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.073688 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c5017f20-1e2f-44e6-9b68-f8b64ca42cc2" (UID: "c5017f20-1e2f-44e6-9b68-f8b64ca42cc2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.145700 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-72k77" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.174472 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sm6xv\" (UniqueName: \"kubernetes.io/projected/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2-kube-api-access-sm6xv\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.174506 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.177312 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b9e5-account-create-8s7mq" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.182162 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8f09-account-create-gl77m" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.275546 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wm95f\" (UniqueName: \"kubernetes.io/projected/a43c3555-7a5d-4824-a913-98e74014c48c-kube-api-access-wm95f\") pod \"a43c3555-7a5d-4824-a913-98e74014c48c\" (UID: \"a43c3555-7a5d-4824-a913-98e74014c48c\") " Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.275706 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hgx65\" (UniqueName: \"kubernetes.io/projected/352ab718-d89c-430d-a7ca-a7ad712d63d6-kube-api-access-hgx65\") pod \"352ab718-d89c-430d-a7ca-a7ad712d63d6\" (UID: \"352ab718-d89c-430d-a7ca-a7ad712d63d6\") " Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.275738 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0335a31-5586-412d-b8fd-740763f60392-operator-scripts\") pod \"e0335a31-5586-412d-b8fd-740763f60392\" (UID: \"e0335a31-5586-412d-b8fd-740763f60392\") " Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.275924 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a43c3555-7a5d-4824-a913-98e74014c48c-operator-scripts\") pod \"a43c3555-7a5d-4824-a913-98e74014c48c\" (UID: \"a43c3555-7a5d-4824-a913-98e74014c48c\") " Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.276075 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/352ab718-d89c-430d-a7ca-a7ad712d63d6-operator-scripts\") pod \"352ab718-d89c-430d-a7ca-a7ad712d63d6\" (UID: \"352ab718-d89c-430d-a7ca-a7ad712d63d6\") " Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.276130 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66dsv\" (UniqueName: \"kubernetes.io/projected/e0335a31-5586-412d-b8fd-740763f60392-kube-api-access-66dsv\") pod \"e0335a31-5586-412d-b8fd-740763f60392\" (UID: \"e0335a31-5586-412d-b8fd-740763f60392\") " Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.276617 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0335a31-5586-412d-b8fd-740763f60392-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e0335a31-5586-412d-b8fd-740763f60392" (UID: "e0335a31-5586-412d-b8fd-740763f60392"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.277275 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e0335a31-5586-412d-b8fd-740763f60392-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.277594 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a43c3555-7a5d-4824-a913-98e74014c48c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a43c3555-7a5d-4824-a913-98e74014c48c" (UID: "a43c3555-7a5d-4824-a913-98e74014c48c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.278128 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/352ab718-d89c-430d-a7ca-a7ad712d63d6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "352ab718-d89c-430d-a7ca-a7ad712d63d6" (UID: "352ab718-d89c-430d-a7ca-a7ad712d63d6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.280992 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a43c3555-7a5d-4824-a913-98e74014c48c-kube-api-access-wm95f" (OuterVolumeSpecName: "kube-api-access-wm95f") pod "a43c3555-7a5d-4824-a913-98e74014c48c" (UID: "a43c3555-7a5d-4824-a913-98e74014c48c"). InnerVolumeSpecName "kube-api-access-wm95f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.281550 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0335a31-5586-412d-b8fd-740763f60392-kube-api-access-66dsv" (OuterVolumeSpecName: "kube-api-access-66dsv") pod "e0335a31-5586-412d-b8fd-740763f60392" (UID: "e0335a31-5586-412d-b8fd-740763f60392"). InnerVolumeSpecName "kube-api-access-66dsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.284338 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/352ab718-d89c-430d-a7ca-a7ad712d63d6-kube-api-access-hgx65" (OuterVolumeSpecName: "kube-api-access-hgx65") pod "352ab718-d89c-430d-a7ca-a7ad712d63d6" (UID: "352ab718-d89c-430d-a7ca-a7ad712d63d6"). InnerVolumeSpecName "kube-api-access-hgx65". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.382059 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/352ab718-d89c-430d-a7ca-a7ad712d63d6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.382098 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66dsv\" (UniqueName: \"kubernetes.io/projected/e0335a31-5586-412d-b8fd-740763f60392-kube-api-access-66dsv\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.382129 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wm95f\" (UniqueName: \"kubernetes.io/projected/a43c3555-7a5d-4824-a913-98e74014c48c-kube-api-access-wm95f\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.382143 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hgx65\" (UniqueName: \"kubernetes.io/projected/352ab718-d89c-430d-a7ca-a7ad712d63d6-kube-api-access-hgx65\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.382156 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a43c3555-7a5d-4824-a913-98e74014c48c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.394093 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f2a2-account-create-q65xr" event={"ID":"c5017f20-1e2f-44e6-9b68-f8b64ca42cc2","Type":"ContainerDied","Data":"43970803e65f497a4b1cfd8adea59c7cc41359da41c1b5015fd6e95d2ad6a95d"} Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.394180 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43970803e65f497a4b1cfd8adea59c7cc41359da41c1b5015fd6e95d2ad6a95d" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.394247 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f2a2-account-create-q65xr" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.406037 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8f09-account-create-gl77m" event={"ID":"a43c3555-7a5d-4824-a913-98e74014c48c","Type":"ContainerDied","Data":"2983a325f197b442fd65ade8176fea5c45dffcc42c1af3af2c0ec020e235f387"} Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.406148 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2983a325f197b442fd65ade8176fea5c45dffcc42c1af3af2c0ec020e235f387" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.406192 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8f09-account-create-gl77m" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.407974 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b9e5-account-create-8s7mq" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.407973 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b9e5-account-create-8s7mq" event={"ID":"352ab718-d89c-430d-a7ca-a7ad712d63d6","Type":"ContainerDied","Data":"45c846cbd03bf76db2d049a87e1eaf7adb6aa1ae84f77dca2dec204481f33818"} Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.408243 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45c846cbd03bf76db2d049a87e1eaf7adb6aa1ae84f77dca2dec204481f33818" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.414351 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-72k77" event={"ID":"e0335a31-5586-412d-b8fd-740763f60392","Type":"ContainerDied","Data":"9117eaa71dab486cb037b0ef0093de83bce8d8bbdc4e7c9e7e1bb7988872d52a"} Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.414388 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9117eaa71dab486cb037b0ef0093de83bce8d8bbdc4e7c9e7e1bb7988872d52a" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.414504 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-72k77" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.416181 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 10:08:07 crc kubenswrapper[4769]: I1125 10:08:07.416224 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 10:08:08 crc kubenswrapper[4769]: I1125 10:08:08.257115 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="922fd841-fddc-4178-ba91-d3b0d45826a8" path="/var/lib/kubelet/pods/922fd841-fddc-4178-ba91-d3b0d45826a8/volumes" Nov 25 10:08:08 crc kubenswrapper[4769]: I1125 10:08:08.428831 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:08:08 crc kubenswrapper[4769]: I1125 10:08:08.428862 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:08:09 crc kubenswrapper[4769]: I1125 10:08:09.696079 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:08:09 crc kubenswrapper[4769]: I1125 10:08:09.751893 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-7687d6c66-wfxhv"] Nov 25 10:08:09 crc kubenswrapper[4769]: I1125 10:08:09.752138 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-7687d6c66-wfxhv" podUID="4a8a1143-f843-4007-bdc4-095a5047ca69" containerName="heat-engine" containerID="cri-o://43b0c1f50714851eab120856f7380219c1d5f23176617c933003637600370eb3" gracePeriod=60 Nov 25 10:08:10 crc kubenswrapper[4769]: I1125 10:08:10.298534 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 10:08:10 crc kubenswrapper[4769]: I1125 10:08:10.298863 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:08:10 crc kubenswrapper[4769]: I1125 10:08:10.301212 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 10:08:10 crc kubenswrapper[4769]: I1125 10:08:10.338528 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 10:08:10 crc kubenswrapper[4769]: I1125 10:08:10.338633 4769 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:08:10 crc kubenswrapper[4769]: I1125 10:08:10.476624 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.067645 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-q6p8q"] Nov 25 10:08:12 crc kubenswrapper[4769]: E1125 10:08:12.068390 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="922fd841-fddc-4178-ba91-d3b0d45826a8" containerName="registry-server" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068404 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="922fd841-fddc-4178-ba91-d3b0d45826a8" containerName="registry-server" Nov 25 10:08:12 crc kubenswrapper[4769]: E1125 10:08:12.068416 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="352ab718-d89c-430d-a7ca-a7ad712d63d6" containerName="mariadb-account-create" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068422 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="352ab718-d89c-430d-a7ca-a7ad712d63d6" containerName="mariadb-account-create" Nov 25 10:08:12 crc kubenswrapper[4769]: E1125 10:08:12.068450 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a43c3555-7a5d-4824-a913-98e74014c48c" containerName="mariadb-account-create" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068457 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a43c3555-7a5d-4824-a913-98e74014c48c" containerName="mariadb-account-create" Nov 25 10:08:12 crc kubenswrapper[4769]: E1125 10:08:12.068471 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="922fd841-fddc-4178-ba91-d3b0d45826a8" containerName="extract-utilities" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068477 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="922fd841-fddc-4178-ba91-d3b0d45826a8" containerName="extract-utilities" Nov 25 10:08:12 crc kubenswrapper[4769]: E1125 10:08:12.068495 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5017f20-1e2f-44e6-9b68-f8b64ca42cc2" containerName="mariadb-account-create" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068500 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5017f20-1e2f-44e6-9b68-f8b64ca42cc2" containerName="mariadb-account-create" Nov 25 10:08:12 crc kubenswrapper[4769]: E1125 10:08:12.068510 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0335a31-5586-412d-b8fd-740763f60392" containerName="mariadb-database-create" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068517 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0335a31-5586-412d-b8fd-740763f60392" containerName="mariadb-database-create" Nov 25 10:08:12 crc kubenswrapper[4769]: E1125 10:08:12.068534 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94da5c0c-3b95-4513-9fcb-6ee861294428" containerName="mariadb-database-create" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068541 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="94da5c0c-3b95-4513-9fcb-6ee861294428" containerName="mariadb-database-create" Nov 25 10:08:12 crc kubenswrapper[4769]: E1125 10:08:12.068549 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="219e181e-2dbc-410e-aa4e-e9c843fb2aa9" containerName="mariadb-database-create" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068554 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="219e181e-2dbc-410e-aa4e-e9c843fb2aa9" containerName="mariadb-database-create" Nov 25 10:08:12 crc kubenswrapper[4769]: E1125 10:08:12.068571 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="922fd841-fddc-4178-ba91-d3b0d45826a8" containerName="extract-content" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068576 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="922fd841-fddc-4178-ba91-d3b0d45826a8" containerName="extract-content" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068773 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0335a31-5586-412d-b8fd-740763f60392" containerName="mariadb-database-create" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068785 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5017f20-1e2f-44e6-9b68-f8b64ca42cc2" containerName="mariadb-account-create" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068798 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="96b52ba9-e2dc-4a75-8001-b2ce075634e7" containerName="heat-api" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068806 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="94da5c0c-3b95-4513-9fcb-6ee861294428" containerName="mariadb-database-create" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068817 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="219e181e-2dbc-410e-aa4e-e9c843fb2aa9" containerName="mariadb-database-create" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068830 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="352ab718-d89c-430d-a7ca-a7ad712d63d6" containerName="mariadb-account-create" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068840 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="922fd841-fddc-4178-ba91-d3b0d45826a8" containerName="registry-server" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.068852 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="a43c3555-7a5d-4824-a913-98e74014c48c" containerName="mariadb-account-create" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.069694 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.072022 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.072835 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.073090 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-4rmv5" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.094038 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-q6p8q"] Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.173480 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-scripts\") pod \"nova-cell0-conductor-db-sync-q6p8q\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.173557 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-q6p8q\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.173590 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-config-data\") pod \"nova-cell0-conductor-db-sync-q6p8q\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.173610 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xc9c4\" (UniqueName: \"kubernetes.io/projected/69ff4eda-4f38-4778-9e92-ceb7005f8420-kube-api-access-xc9c4\") pod \"nova-cell0-conductor-db-sync-q6p8q\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.275618 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-scripts\") pod \"nova-cell0-conductor-db-sync-q6p8q\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.275691 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-q6p8q\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.275723 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-config-data\") pod \"nova-cell0-conductor-db-sync-q6p8q\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.275742 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xc9c4\" (UniqueName: \"kubernetes.io/projected/69ff4eda-4f38-4778-9e92-ceb7005f8420-kube-api-access-xc9c4\") pod \"nova-cell0-conductor-db-sync-q6p8q\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.277887 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.278120 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.289081 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-q6p8q\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.290259 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-scripts\") pod \"nova-cell0-conductor-db-sync-q6p8q\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.294517 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-config-data\") pod \"nova-cell0-conductor-db-sync-q6p8q\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.296822 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xc9c4\" (UniqueName: \"kubernetes.io/projected/69ff4eda-4f38-4778-9e92-ceb7005f8420-kube-api-access-xc9c4\") pod \"nova-cell0-conductor-db-sync-q6p8q\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.396472 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-4rmv5" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.404478 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:12 crc kubenswrapper[4769]: I1125 10:08:12.955351 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-q6p8q"] Nov 25 10:08:12 crc kubenswrapper[4769]: W1125 10:08:12.976540 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod69ff4eda_4f38_4778_9e92_ceb7005f8420.slice/crio-03196d26015af06428cfc4391c6e662950991068564d1107905a4216de0962df WatchSource:0}: Error finding container 03196d26015af06428cfc4391c6e662950991068564d1107905a4216de0962df: Status 404 returned error can't find the container with id 03196d26015af06428cfc4391c6e662950991068564d1107905a4216de0962df Nov 25 10:08:13 crc kubenswrapper[4769]: I1125 10:08:13.509520 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-q6p8q" event={"ID":"69ff4eda-4f38-4778-9e92-ceb7005f8420","Type":"ContainerStarted","Data":"03196d26015af06428cfc4391c6e662950991068564d1107905a4216de0962df"} Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.445955 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.552865 4769 generic.go:334] "Generic (PLEG): container finished" podID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerID="4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f" exitCode=0 Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.552917 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0fdc6e58-5fd1-4801-b734-a47fc74c7bac","Type":"ContainerDied","Data":"4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f"} Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.552946 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0fdc6e58-5fd1-4801-b734-a47fc74c7bac","Type":"ContainerDied","Data":"5ebff034e341b2b0db68624a7eaa8fb926a63d2a778a7d2303e5e04690095a1f"} Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.552980 4769 scope.go:117] "RemoveContainer" containerID="03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.553145 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.570452 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-config-data\") pod \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.570583 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-scripts\") pod \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.570751 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-sg-core-conf-yaml\") pod \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.570787 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-log-httpd\") pod \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.570829 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-combined-ca-bundle\") pod \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.570876 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-run-httpd\") pod \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.570979 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bb4sw\" (UniqueName: \"kubernetes.io/projected/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-kube-api-access-bb4sw\") pod \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\" (UID: \"0fdc6e58-5fd1-4801-b734-a47fc74c7bac\") " Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.572568 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0fdc6e58-5fd1-4801-b734-a47fc74c7bac" (UID: "0fdc6e58-5fd1-4801-b734-a47fc74c7bac"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.573033 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0fdc6e58-5fd1-4801-b734-a47fc74c7bac" (UID: "0fdc6e58-5fd1-4801-b734-a47fc74c7bac"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.578194 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-scripts" (OuterVolumeSpecName: "scripts") pod "0fdc6e58-5fd1-4801-b734-a47fc74c7bac" (UID: "0fdc6e58-5fd1-4801-b734-a47fc74c7bac"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.580116 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-kube-api-access-bb4sw" (OuterVolumeSpecName: "kube-api-access-bb4sw") pod "0fdc6e58-5fd1-4801-b734-a47fc74c7bac" (UID: "0fdc6e58-5fd1-4801-b734-a47fc74c7bac"). InnerVolumeSpecName "kube-api-access-bb4sw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.581415 4769 scope.go:117] "RemoveContainer" containerID="126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.620196 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0fdc6e58-5fd1-4801-b734-a47fc74c7bac" (UID: "0fdc6e58-5fd1-4801-b734-a47fc74c7bac"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.674351 4769 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.674605 4769 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.674668 4769 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.674750 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bb4sw\" (UniqueName: \"kubernetes.io/projected/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-kube-api-access-bb4sw\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.674814 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.686299 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0fdc6e58-5fd1-4801-b734-a47fc74c7bac" (UID: "0fdc6e58-5fd1-4801-b734-a47fc74c7bac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.700637 4769 scope.go:117] "RemoveContainer" containerID="15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.732082 4769 scope.go:117] "RemoveContainer" containerID="4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.741808 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-config-data" (OuterVolumeSpecName: "config-data") pod "0fdc6e58-5fd1-4801-b734-a47fc74c7bac" (UID: "0fdc6e58-5fd1-4801-b734-a47fc74c7bac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.777122 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.777156 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fdc6e58-5fd1-4801-b734-a47fc74c7bac-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.777737 4769 scope.go:117] "RemoveContainer" containerID="03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d" Nov 25 10:08:15 crc kubenswrapper[4769]: E1125 10:08:15.778418 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d\": container with ID starting with 03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d not found: ID does not exist" containerID="03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.778575 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d"} err="failed to get container status \"03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d\": rpc error: code = NotFound desc = could not find container \"03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d\": container with ID starting with 03128a541c8c3ac942e56adf5b7a1225b597d4361e13a587d9ea9ddd02b16a5d not found: ID does not exist" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.778814 4769 scope.go:117] "RemoveContainer" containerID="126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d" Nov 25 10:08:15 crc kubenswrapper[4769]: E1125 10:08:15.780899 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d\": container with ID starting with 126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d not found: ID does not exist" containerID="126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.780941 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d"} err="failed to get container status \"126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d\": rpc error: code = NotFound desc = could not find container \"126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d\": container with ID starting with 126ea340f0d7a0a4eff280595a8d09c3af7c163104cf6f7a8f1cc73b26b8dc0d not found: ID does not exist" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.780989 4769 scope.go:117] "RemoveContainer" containerID="15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f" Nov 25 10:08:15 crc kubenswrapper[4769]: E1125 10:08:15.781446 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f\": container with ID starting with 15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f not found: ID does not exist" containerID="15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.781510 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f"} err="failed to get container status \"15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f\": rpc error: code = NotFound desc = could not find container \"15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f\": container with ID starting with 15a262568e1f746af5762362cb62481f8f508ac12cd1c4d4967ef6253d19683f not found: ID does not exist" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.781545 4769 scope.go:117] "RemoveContainer" containerID="4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f" Nov 25 10:08:15 crc kubenswrapper[4769]: E1125 10:08:15.782396 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f\": container with ID starting with 4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f not found: ID does not exist" containerID="4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.782600 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f"} err="failed to get container status \"4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f\": rpc error: code = NotFound desc = could not find container \"4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f\": container with ID starting with 4897b8c5e6af0a24e9d756676a48742fc4d7894b7cde97f9333dc30731e5d13f not found: ID does not exist" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.908036 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.927056 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.946925 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:08:15 crc kubenswrapper[4769]: E1125 10:08:15.947515 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="sg-core" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.947536 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="sg-core" Nov 25 10:08:15 crc kubenswrapper[4769]: E1125 10:08:15.947572 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="ceilometer-central-agent" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.947581 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="ceilometer-central-agent" Nov 25 10:08:15 crc kubenswrapper[4769]: E1125 10:08:15.947609 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="proxy-httpd" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.947615 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="proxy-httpd" Nov 25 10:08:15 crc kubenswrapper[4769]: E1125 10:08:15.947627 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="ceilometer-notification-agent" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.947633 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="ceilometer-notification-agent" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.947842 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="ceilometer-central-agent" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.947864 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="proxy-httpd" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.947873 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="ceilometer-notification-agent" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.947888 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" containerName="sg-core" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.950187 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.955689 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.956391 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.964401 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.986434 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bd31c36-b250-469b-b700-56617d826d79-log-httpd\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.986492 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.986537 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r78zz\" (UniqueName: \"kubernetes.io/projected/5bd31c36-b250-469b-b700-56617d826d79-kube-api-access-r78zz\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.986557 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.986661 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-config-data\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.986698 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bd31c36-b250-469b-b700-56617d826d79-run-httpd\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:15 crc kubenswrapper[4769]: I1125 10:08:15.986872 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-scripts\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.088720 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bd31c36-b250-469b-b700-56617d826d79-log-httpd\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.088776 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.088816 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r78zz\" (UniqueName: \"kubernetes.io/projected/5bd31c36-b250-469b-b700-56617d826d79-kube-api-access-r78zz\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.088835 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.088899 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-config-data\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.088928 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bd31c36-b250-469b-b700-56617d826d79-run-httpd\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.089067 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-scripts\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.089481 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bd31c36-b250-469b-b700-56617d826d79-log-httpd\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.094010 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.094623 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bd31c36-b250-469b-b700-56617d826d79-run-httpd\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.094938 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.095114 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-scripts\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.097753 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-config-data\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.115885 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r78zz\" (UniqueName: \"kubernetes.io/projected/5bd31c36-b250-469b-b700-56617d826d79-kube-api-access-r78zz\") pod \"ceilometer-0\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.258310 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fdc6e58-5fd1-4801-b734-a47fc74c7bac" path="/var/lib/kubelet/pods/0fdc6e58-5fd1-4801-b734-a47fc74c7bac/volumes" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.284511 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:08:16 crc kubenswrapper[4769]: I1125 10:08:16.788640 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:08:16 crc kubenswrapper[4769]: W1125 10:08:16.792122 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5bd31c36_b250_469b_b700_56617d826d79.slice/crio-da32657a82a271aa12206af83b269d293075fef2ce0ccb323a79d0fb6898bf91 WatchSource:0}: Error finding container da32657a82a271aa12206af83b269d293075fef2ce0ccb323a79d0fb6898bf91: Status 404 returned error can't find the container with id da32657a82a271aa12206af83b269d293075fef2ce0ccb323a79d0fb6898bf91 Nov 25 10:08:17 crc kubenswrapper[4769]: I1125 10:08:17.627875 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bd31c36-b250-469b-b700-56617d826d79","Type":"ContainerStarted","Data":"da32657a82a271aa12206af83b269d293075fef2ce0ccb323a79d0fb6898bf91"} Nov 25 10:08:19 crc kubenswrapper[4769]: E1125 10:08:19.614158 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 43b0c1f50714851eab120856f7380219c1d5f23176617c933003637600370eb3 is running failed: container process not found" containerID="43b0c1f50714851eab120856f7380219c1d5f23176617c933003637600370eb3" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:08:19 crc kubenswrapper[4769]: E1125 10:08:19.616011 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 43b0c1f50714851eab120856f7380219c1d5f23176617c933003637600370eb3 is running failed: container process not found" containerID="43b0c1f50714851eab120856f7380219c1d5f23176617c933003637600370eb3" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:08:19 crc kubenswrapper[4769]: E1125 10:08:19.617258 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 43b0c1f50714851eab120856f7380219c1d5f23176617c933003637600370eb3 is running failed: container process not found" containerID="43b0c1f50714851eab120856f7380219c1d5f23176617c933003637600370eb3" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:08:19 crc kubenswrapper[4769]: E1125 10:08:19.617326 4769 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 43b0c1f50714851eab120856f7380219c1d5f23176617c933003637600370eb3 is running failed: container process not found" probeType="Readiness" pod="openstack/heat-engine-7687d6c66-wfxhv" podUID="4a8a1143-f843-4007-bdc4-095a5047ca69" containerName="heat-engine" Nov 25 10:08:19 crc kubenswrapper[4769]: I1125 10:08:19.637848 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:08:19 crc kubenswrapper[4769]: I1125 10:08:19.659349 4769 generic.go:334] "Generic (PLEG): container finished" podID="4a8a1143-f843-4007-bdc4-095a5047ca69" containerID="43b0c1f50714851eab120856f7380219c1d5f23176617c933003637600370eb3" exitCode=0 Nov 25 10:08:19 crc kubenswrapper[4769]: I1125 10:08:19.659393 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7687d6c66-wfxhv" event={"ID":"4a8a1143-f843-4007-bdc4-095a5047ca69","Type":"ContainerDied","Data":"43b0c1f50714851eab120856f7380219c1d5f23176617c933003637600370eb3"} Nov 25 10:08:20 crc kubenswrapper[4769]: I1125 10:08:20.442277 4769 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod994253aa-3f23-4bce-839a-9193a22d976e"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod994253aa-3f23-4bce-839a-9193a22d976e] : Timed out while waiting for systemd to remove kubepods-besteffort-pod994253aa_3f23_4bce_839a_9193a22d976e.slice" Nov 25 10:08:20 crc kubenswrapper[4769]: E1125 10:08:20.442345 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod994253aa-3f23-4bce-839a-9193a22d976e] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod994253aa-3f23-4bce-839a-9193a22d976e] : Timed out while waiting for systemd to remove kubepods-besteffort-pod994253aa_3f23_4bce_839a_9193a22d976e.slice" pod="openstack/heat-api-86c99fcffb-tfxvs" podUID="994253aa-3f23-4bce-839a-9193a22d976e" Nov 25 10:08:20 crc kubenswrapper[4769]: I1125 10:08:20.671181 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-86c99fcffb-tfxvs" Nov 25 10:08:20 crc kubenswrapper[4769]: I1125 10:08:20.706061 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-86c99fcffb-tfxvs"] Nov 25 10:08:20 crc kubenswrapper[4769]: I1125 10:08:20.722328 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-86c99fcffb-tfxvs"] Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.266582 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="994253aa-3f23-4bce-839a-9193a22d976e" path="/var/lib/kubelet/pods/994253aa-3f23-4bce-839a-9193a22d976e/volumes" Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.696117 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-7687d6c66-wfxhv" event={"ID":"4a8a1143-f843-4007-bdc4-095a5047ca69","Type":"ContainerDied","Data":"e01285c7c6b85091b06b425f38de0007437ed6a2dd02b8bd3e8f296ae64d560a"} Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.696581 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e01285c7c6b85091b06b425f38de0007437ed6a2dd02b8bd3e8f296ae64d560a" Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.766901 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.895318 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-combined-ca-bundle\") pod \"4a8a1143-f843-4007-bdc4-095a5047ca69\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.895720 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-config-data\") pod \"4a8a1143-f843-4007-bdc4-095a5047ca69\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.895766 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-config-data-custom\") pod \"4a8a1143-f843-4007-bdc4-095a5047ca69\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.895792 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ps94w\" (UniqueName: \"kubernetes.io/projected/4a8a1143-f843-4007-bdc4-095a5047ca69-kube-api-access-ps94w\") pod \"4a8a1143-f843-4007-bdc4-095a5047ca69\" (UID: \"4a8a1143-f843-4007-bdc4-095a5047ca69\") " Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.903117 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4a8a1143-f843-4007-bdc4-095a5047ca69" (UID: "4a8a1143-f843-4007-bdc4-095a5047ca69"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.905166 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a8a1143-f843-4007-bdc4-095a5047ca69-kube-api-access-ps94w" (OuterVolumeSpecName: "kube-api-access-ps94w") pod "4a8a1143-f843-4007-bdc4-095a5047ca69" (UID: "4a8a1143-f843-4007-bdc4-095a5047ca69"). InnerVolumeSpecName "kube-api-access-ps94w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.930743 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4a8a1143-f843-4007-bdc4-095a5047ca69" (UID: "4a8a1143-f843-4007-bdc4-095a5047ca69"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.959525 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-config-data" (OuterVolumeSpecName: "config-data") pod "4a8a1143-f843-4007-bdc4-095a5047ca69" (UID: "4a8a1143-f843-4007-bdc4-095a5047ca69"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.998461 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.998510 4769 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.998544 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ps94w\" (UniqueName: \"kubernetes.io/projected/4a8a1143-f843-4007-bdc4-095a5047ca69-kube-api-access-ps94w\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:22 crc kubenswrapper[4769]: I1125 10:08:22.998559 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a8a1143-f843-4007-bdc4-095a5047ca69-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:23 crc kubenswrapper[4769]: I1125 10:08:23.722672 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-q6p8q" event={"ID":"69ff4eda-4f38-4778-9e92-ceb7005f8420","Type":"ContainerStarted","Data":"33eff60b4e7d2f17d0fe5a2e17a8a4780d67a8be77cb9f89cad9453ef6b5d7c9"} Nov 25 10:08:23 crc kubenswrapper[4769]: I1125 10:08:23.733177 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-7687d6c66-wfxhv" Nov 25 10:08:23 crc kubenswrapper[4769]: I1125 10:08:23.733373 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bd31c36-b250-469b-b700-56617d826d79","Type":"ContainerStarted","Data":"3e579c48049c0f0e7ca98e7128a5bdb2b3249d1ee77c4fdef2890b33b1b008e6"} Nov 25 10:08:23 crc kubenswrapper[4769]: I1125 10:08:23.751329 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-q6p8q" podStartSLOduration=2.305567785 podStartE2EDuration="11.751306698s" podCreationTimestamp="2025-11-25 10:08:12 +0000 UTC" firstStartedPulling="2025-11-25 10:08:12.980432685 +0000 UTC m=+1441.565404998" lastFinishedPulling="2025-11-25 10:08:22.426171598 +0000 UTC m=+1451.011143911" observedRunningTime="2025-11-25 10:08:23.744579935 +0000 UTC m=+1452.329552248" watchObservedRunningTime="2025-11-25 10:08:23.751306698 +0000 UTC m=+1452.336279011" Nov 25 10:08:23 crc kubenswrapper[4769]: E1125 10:08:23.794375 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a8a1143_f843_4007_bdc4_095a5047ca69.slice\": RecentStats: unable to find data in memory cache]" Nov 25 10:08:23 crc kubenswrapper[4769]: I1125 10:08:23.794380 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-7687d6c66-wfxhv"] Nov 25 10:08:23 crc kubenswrapper[4769]: I1125 10:08:23.816054 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-7687d6c66-wfxhv"] Nov 25 10:08:24 crc kubenswrapper[4769]: I1125 10:08:24.262755 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a8a1143-f843-4007-bdc4-095a5047ca69" path="/var/lib/kubelet/pods/4a8a1143-f843-4007-bdc4-095a5047ca69/volumes" Nov 25 10:08:24 crc kubenswrapper[4769]: I1125 10:08:24.748631 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bd31c36-b250-469b-b700-56617d826d79","Type":"ContainerStarted","Data":"6fb267bebeeda4111c0eb8d9768069b47226b6ce6d86b53830e2f5d2310a4f58"} Nov 25 10:08:24 crc kubenswrapper[4769]: I1125 10:08:24.749091 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bd31c36-b250-469b-b700-56617d826d79","Type":"ContainerStarted","Data":"b5b8d8c06083bbd215e2b4f4292494655933053fd68ab294674ab76522deebbe"} Nov 25 10:08:26 crc kubenswrapper[4769]: I1125 10:08:26.777876 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bd31c36-b250-469b-b700-56617d826d79","Type":"ContainerStarted","Data":"f1198c60590d8d1f401e3318ddb6cc136ac2779af8cfac1481eead81cbdbc938"} Nov 25 10:08:26 crc kubenswrapper[4769]: I1125 10:08:26.778708 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:08:26 crc kubenswrapper[4769]: I1125 10:08:26.778283 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="sg-core" containerID="cri-o://6fb267bebeeda4111c0eb8d9768069b47226b6ce6d86b53830e2f5d2310a4f58" gracePeriod=30 Nov 25 10:08:26 crc kubenswrapper[4769]: I1125 10:08:26.778181 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="ceilometer-central-agent" containerID="cri-o://3e579c48049c0f0e7ca98e7128a5bdb2b3249d1ee77c4fdef2890b33b1b008e6" gracePeriod=30 Nov 25 10:08:26 crc kubenswrapper[4769]: I1125 10:08:26.778377 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="ceilometer-notification-agent" containerID="cri-o://b5b8d8c06083bbd215e2b4f4292494655933053fd68ab294674ab76522deebbe" gracePeriod=30 Nov 25 10:08:26 crc kubenswrapper[4769]: I1125 10:08:26.778361 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="proxy-httpd" containerID="cri-o://f1198c60590d8d1f401e3318ddb6cc136ac2779af8cfac1481eead81cbdbc938" gracePeriod=30 Nov 25 10:08:26 crc kubenswrapper[4769]: I1125 10:08:26.828410 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.664129197 podStartE2EDuration="11.828384041s" podCreationTimestamp="2025-11-25 10:08:15 +0000 UTC" firstStartedPulling="2025-11-25 10:08:16.796857468 +0000 UTC m=+1445.381829781" lastFinishedPulling="2025-11-25 10:08:25.961112302 +0000 UTC m=+1454.546084625" observedRunningTime="2025-11-25 10:08:26.817352719 +0000 UTC m=+1455.402325032" watchObservedRunningTime="2025-11-25 10:08:26.828384041 +0000 UTC m=+1455.413356354" Nov 25 10:08:27 crc kubenswrapper[4769]: I1125 10:08:27.793096 4769 generic.go:334] "Generic (PLEG): container finished" podID="5bd31c36-b250-469b-b700-56617d826d79" containerID="6fb267bebeeda4111c0eb8d9768069b47226b6ce6d86b53830e2f5d2310a4f58" exitCode=2 Nov 25 10:08:27 crc kubenswrapper[4769]: I1125 10:08:27.794415 4769 generic.go:334] "Generic (PLEG): container finished" podID="5bd31c36-b250-469b-b700-56617d826d79" containerID="b5b8d8c06083bbd215e2b4f4292494655933053fd68ab294674ab76522deebbe" exitCode=0 Nov 25 10:08:27 crc kubenswrapper[4769]: I1125 10:08:27.794543 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bd31c36-b250-469b-b700-56617d826d79","Type":"ContainerDied","Data":"6fb267bebeeda4111c0eb8d9768069b47226b6ce6d86b53830e2f5d2310a4f58"} Nov 25 10:08:27 crc kubenswrapper[4769]: I1125 10:08:27.794655 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bd31c36-b250-469b-b700-56617d826d79","Type":"ContainerDied","Data":"b5b8d8c06083bbd215e2b4f4292494655933053fd68ab294674ab76522deebbe"} Nov 25 10:08:33 crc kubenswrapper[4769]: I1125 10:08:33.879683 4769 generic.go:334] "Generic (PLEG): container finished" podID="5bd31c36-b250-469b-b700-56617d826d79" containerID="3e579c48049c0f0e7ca98e7128a5bdb2b3249d1ee77c4fdef2890b33b1b008e6" exitCode=0 Nov 25 10:08:33 crc kubenswrapper[4769]: I1125 10:08:33.879768 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bd31c36-b250-469b-b700-56617d826d79","Type":"ContainerDied","Data":"3e579c48049c0f0e7ca98e7128a5bdb2b3249d1ee77c4fdef2890b33b1b008e6"} Nov 25 10:08:35 crc kubenswrapper[4769]: I1125 10:08:35.906784 4769 generic.go:334] "Generic (PLEG): container finished" podID="69ff4eda-4f38-4778-9e92-ceb7005f8420" containerID="33eff60b4e7d2f17d0fe5a2e17a8a4780d67a8be77cb9f89cad9453ef6b5d7c9" exitCode=0 Nov 25 10:08:35 crc kubenswrapper[4769]: I1125 10:08:35.906856 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-q6p8q" event={"ID":"69ff4eda-4f38-4778-9e92-ceb7005f8420","Type":"ContainerDied","Data":"33eff60b4e7d2f17d0fe5a2e17a8a4780d67a8be77cb9f89cad9453ef6b5d7c9"} Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.394649 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.507432 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-config-data\") pod \"69ff4eda-4f38-4778-9e92-ceb7005f8420\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.507497 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-scripts\") pod \"69ff4eda-4f38-4778-9e92-ceb7005f8420\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.507673 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xc9c4\" (UniqueName: \"kubernetes.io/projected/69ff4eda-4f38-4778-9e92-ceb7005f8420-kube-api-access-xc9c4\") pod \"69ff4eda-4f38-4778-9e92-ceb7005f8420\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.507711 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-combined-ca-bundle\") pod \"69ff4eda-4f38-4778-9e92-ceb7005f8420\" (UID: \"69ff4eda-4f38-4778-9e92-ceb7005f8420\") " Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.516120 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69ff4eda-4f38-4778-9e92-ceb7005f8420-kube-api-access-xc9c4" (OuterVolumeSpecName: "kube-api-access-xc9c4") pod "69ff4eda-4f38-4778-9e92-ceb7005f8420" (UID: "69ff4eda-4f38-4778-9e92-ceb7005f8420"). InnerVolumeSpecName "kube-api-access-xc9c4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.528169 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-scripts" (OuterVolumeSpecName: "scripts") pod "69ff4eda-4f38-4778-9e92-ceb7005f8420" (UID: "69ff4eda-4f38-4778-9e92-ceb7005f8420"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.545183 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-config-data" (OuterVolumeSpecName: "config-data") pod "69ff4eda-4f38-4778-9e92-ceb7005f8420" (UID: "69ff4eda-4f38-4778-9e92-ceb7005f8420"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.547007 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69ff4eda-4f38-4778-9e92-ceb7005f8420" (UID: "69ff4eda-4f38-4778-9e92-ceb7005f8420"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.611482 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.611537 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.611556 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xc9c4\" (UniqueName: \"kubernetes.io/projected/69ff4eda-4f38-4778-9e92-ceb7005f8420-kube-api-access-xc9c4\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.611569 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ff4eda-4f38-4778-9e92-ceb7005f8420-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.933889 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-q6p8q" event={"ID":"69ff4eda-4f38-4778-9e92-ceb7005f8420","Type":"ContainerDied","Data":"03196d26015af06428cfc4391c6e662950991068564d1107905a4216de0962df"} Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.934220 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03196d26015af06428cfc4391c6e662950991068564d1107905a4216de0962df" Nov 25 10:08:37 crc kubenswrapper[4769]: I1125 10:08:37.934111 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-q6p8q" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.054248 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 10:08:38 crc kubenswrapper[4769]: E1125 10:08:38.055238 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69ff4eda-4f38-4778-9e92-ceb7005f8420" containerName="nova-cell0-conductor-db-sync" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.055259 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="69ff4eda-4f38-4778-9e92-ceb7005f8420" containerName="nova-cell0-conductor-db-sync" Nov 25 10:08:38 crc kubenswrapper[4769]: E1125 10:08:38.055274 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a8a1143-f843-4007-bdc4-095a5047ca69" containerName="heat-engine" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.055283 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a8a1143-f843-4007-bdc4-095a5047ca69" containerName="heat-engine" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.055477 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a8a1143-f843-4007-bdc4-095a5047ca69" containerName="heat-engine" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.055502 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="69ff4eda-4f38-4778-9e92-ceb7005f8420" containerName="nova-cell0-conductor-db-sync" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.056290 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.059338 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-4rmv5" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.060013 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.067984 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.225389 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85732feb-40a3-4155-aa20-fd8d6207b357-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"85732feb-40a3-4155-aa20-fd8d6207b357\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.225685 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85732feb-40a3-4155-aa20-fd8d6207b357-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"85732feb-40a3-4155-aa20-fd8d6207b357\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.226178 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drpbr\" (UniqueName: \"kubernetes.io/projected/85732feb-40a3-4155-aa20-fd8d6207b357-kube-api-access-drpbr\") pod \"nova-cell0-conductor-0\" (UID: \"85732feb-40a3-4155-aa20-fd8d6207b357\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.330718 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85732feb-40a3-4155-aa20-fd8d6207b357-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"85732feb-40a3-4155-aa20-fd8d6207b357\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.330926 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85732feb-40a3-4155-aa20-fd8d6207b357-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"85732feb-40a3-4155-aa20-fd8d6207b357\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.331259 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drpbr\" (UniqueName: \"kubernetes.io/projected/85732feb-40a3-4155-aa20-fd8d6207b357-kube-api-access-drpbr\") pod \"nova-cell0-conductor-0\" (UID: \"85732feb-40a3-4155-aa20-fd8d6207b357\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.342476 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85732feb-40a3-4155-aa20-fd8d6207b357-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"85732feb-40a3-4155-aa20-fd8d6207b357\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.346018 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85732feb-40a3-4155-aa20-fd8d6207b357-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"85732feb-40a3-4155-aa20-fd8d6207b357\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.360802 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drpbr\" (UniqueName: \"kubernetes.io/projected/85732feb-40a3-4155-aa20-fd8d6207b357-kube-api-access-drpbr\") pod \"nova-cell0-conductor-0\" (UID: \"85732feb-40a3-4155-aa20-fd8d6207b357\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.380693 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.922647 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 10:08:38 crc kubenswrapper[4769]: W1125 10:08:38.927461 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod85732feb_40a3_4155_aa20_fd8d6207b357.slice/crio-4e20bf4da033a872b9487962d3750a13facdfada7e29df77cfd842c4e172adcb WatchSource:0}: Error finding container 4e20bf4da033a872b9487962d3750a13facdfada7e29df77cfd842c4e172adcb: Status 404 returned error can't find the container with id 4e20bf4da033a872b9487962d3750a13facdfada7e29df77cfd842c4e172adcb Nov 25 10:08:38 crc kubenswrapper[4769]: I1125 10:08:38.969019 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"85732feb-40a3-4155-aa20-fd8d6207b357","Type":"ContainerStarted","Data":"4e20bf4da033a872b9487962d3750a13facdfada7e29df77cfd842c4e172adcb"} Nov 25 10:08:39 crc kubenswrapper[4769]: I1125 10:08:39.984016 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"85732feb-40a3-4155-aa20-fd8d6207b357","Type":"ContainerStarted","Data":"111b01b4ef16a0b8423fdd08690a6c70fcb53af424c99f1fd4b73da4cf481eeb"} Nov 25 10:08:39 crc kubenswrapper[4769]: I1125 10:08:39.984728 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.013907 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.013890951 podStartE2EDuration="2.013890951s" podCreationTimestamp="2025-11-25 10:08:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:08:40.011011807 +0000 UTC m=+1468.595984150" watchObservedRunningTime="2025-11-25 10:08:40.013890951 +0000 UTC m=+1468.598863264" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.389260 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-scf89"] Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.391038 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-scf89" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.424730 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-scf89"] Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.465310 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-d76d-account-create-cqmjg"] Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.468245 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-d76d-account-create-cqmjg" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.471887 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.483295 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6f7z\" (UniqueName: \"kubernetes.io/projected/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f-kube-api-access-w6f7z\") pod \"aodh-db-create-scf89\" (UID: \"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f\") " pod="openstack/aodh-db-create-scf89" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.483533 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f-operator-scripts\") pod \"aodh-db-create-scf89\" (UID: \"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f\") " pod="openstack/aodh-db-create-scf89" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.486985 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-d76d-account-create-cqmjg"] Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.586385 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9817ed57-5bda-46f1-ad02-e2ac6e770543-operator-scripts\") pod \"aodh-d76d-account-create-cqmjg\" (UID: \"9817ed57-5bda-46f1-ad02-e2ac6e770543\") " pod="openstack/aodh-d76d-account-create-cqmjg" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.586472 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zghht\" (UniqueName: \"kubernetes.io/projected/9817ed57-5bda-46f1-ad02-e2ac6e770543-kube-api-access-zghht\") pod \"aodh-d76d-account-create-cqmjg\" (UID: \"9817ed57-5bda-46f1-ad02-e2ac6e770543\") " pod="openstack/aodh-d76d-account-create-cqmjg" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.586512 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f-operator-scripts\") pod \"aodh-db-create-scf89\" (UID: \"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f\") " pod="openstack/aodh-db-create-scf89" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.586652 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6f7z\" (UniqueName: \"kubernetes.io/projected/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f-kube-api-access-w6f7z\") pod \"aodh-db-create-scf89\" (UID: \"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f\") " pod="openstack/aodh-db-create-scf89" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.587817 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f-operator-scripts\") pod \"aodh-db-create-scf89\" (UID: \"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f\") " pod="openstack/aodh-db-create-scf89" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.608663 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6f7z\" (UniqueName: \"kubernetes.io/projected/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f-kube-api-access-w6f7z\") pod \"aodh-db-create-scf89\" (UID: \"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f\") " pod="openstack/aodh-db-create-scf89" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.689311 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zghht\" (UniqueName: \"kubernetes.io/projected/9817ed57-5bda-46f1-ad02-e2ac6e770543-kube-api-access-zghht\") pod \"aodh-d76d-account-create-cqmjg\" (UID: \"9817ed57-5bda-46f1-ad02-e2ac6e770543\") " pod="openstack/aodh-d76d-account-create-cqmjg" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.691461 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9817ed57-5bda-46f1-ad02-e2ac6e770543-operator-scripts\") pod \"aodh-d76d-account-create-cqmjg\" (UID: \"9817ed57-5bda-46f1-ad02-e2ac6e770543\") " pod="openstack/aodh-d76d-account-create-cqmjg" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.692245 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9817ed57-5bda-46f1-ad02-e2ac6e770543-operator-scripts\") pod \"aodh-d76d-account-create-cqmjg\" (UID: \"9817ed57-5bda-46f1-ad02-e2ac6e770543\") " pod="openstack/aodh-d76d-account-create-cqmjg" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.708287 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zghht\" (UniqueName: \"kubernetes.io/projected/9817ed57-5bda-46f1-ad02-e2ac6e770543-kube-api-access-zghht\") pod \"aodh-d76d-account-create-cqmjg\" (UID: \"9817ed57-5bda-46f1-ad02-e2ac6e770543\") " pod="openstack/aodh-d76d-account-create-cqmjg" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.726350 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-scf89" Nov 25 10:08:40 crc kubenswrapper[4769]: I1125 10:08:40.787435 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-d76d-account-create-cqmjg" Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.279260 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-scf89"] Nov 25 10:08:41 crc kubenswrapper[4769]: W1125 10:08:41.281446 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b1e6bf8_e126_48bf_9692_e38e6a3b9f2f.slice/crio-219ed76a2e444b4723e7ee458a8fd1378373b6037250f3db9425b51c3257a239 WatchSource:0}: Error finding container 219ed76a2e444b4723e7ee458a8fd1378373b6037250f3db9425b51c3257a239: Status 404 returned error can't find the container with id 219ed76a2e444b4723e7ee458a8fd1378373b6037250f3db9425b51c3257a239 Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.373113 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pcp4d"] Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.378981 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.384273 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pcp4d"] Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.396701 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-d76d-account-create-cqmjg"] Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.515703 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7937ea4d-23ba-4f0a-af90-1257f8592d1e-catalog-content\") pod \"community-operators-pcp4d\" (UID: \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\") " pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.515806 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7937ea4d-23ba-4f0a-af90-1257f8592d1e-utilities\") pod \"community-operators-pcp4d\" (UID: \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\") " pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.515843 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpsd8\" (UniqueName: \"kubernetes.io/projected/7937ea4d-23ba-4f0a-af90-1257f8592d1e-kube-api-access-kpsd8\") pod \"community-operators-pcp4d\" (UID: \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\") " pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.617858 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7937ea4d-23ba-4f0a-af90-1257f8592d1e-utilities\") pod \"community-operators-pcp4d\" (UID: \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\") " pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.618311 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpsd8\" (UniqueName: \"kubernetes.io/projected/7937ea4d-23ba-4f0a-af90-1257f8592d1e-kube-api-access-kpsd8\") pod \"community-operators-pcp4d\" (UID: \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\") " pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.618421 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7937ea4d-23ba-4f0a-af90-1257f8592d1e-utilities\") pod \"community-operators-pcp4d\" (UID: \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\") " pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.618904 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7937ea4d-23ba-4f0a-af90-1257f8592d1e-catalog-content\") pod \"community-operators-pcp4d\" (UID: \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\") " pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.619200 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7937ea4d-23ba-4f0a-af90-1257f8592d1e-catalog-content\") pod \"community-operators-pcp4d\" (UID: \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\") " pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.641853 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpsd8\" (UniqueName: \"kubernetes.io/projected/7937ea4d-23ba-4f0a-af90-1257f8592d1e-kube-api-access-kpsd8\") pod \"community-operators-pcp4d\" (UID: \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\") " pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:41 crc kubenswrapper[4769]: I1125 10:08:41.727810 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:42 crc kubenswrapper[4769]: I1125 10:08:42.025187 4769 generic.go:334] "Generic (PLEG): container finished" podID="4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f" containerID="9fc04321d90f797532ec952fd5c985b529d666039ff5b01f6c7dcc7faa6b7748" exitCode=0 Nov 25 10:08:42 crc kubenswrapper[4769]: I1125 10:08:42.026092 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-scf89" event={"ID":"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f","Type":"ContainerDied","Data":"9fc04321d90f797532ec952fd5c985b529d666039ff5b01f6c7dcc7faa6b7748"} Nov 25 10:08:42 crc kubenswrapper[4769]: I1125 10:08:42.026171 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-scf89" event={"ID":"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f","Type":"ContainerStarted","Data":"219ed76a2e444b4723e7ee458a8fd1378373b6037250f3db9425b51c3257a239"} Nov 25 10:08:42 crc kubenswrapper[4769]: I1125 10:08:42.029875 4769 generic.go:334] "Generic (PLEG): container finished" podID="9817ed57-5bda-46f1-ad02-e2ac6e770543" containerID="6efa13f3c1f1ef376f8c9106ef8b49ede65c553462019c2a0107ae516c4f2a50" exitCode=0 Nov 25 10:08:42 crc kubenswrapper[4769]: I1125 10:08:42.029932 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-d76d-account-create-cqmjg" event={"ID":"9817ed57-5bda-46f1-ad02-e2ac6e770543","Type":"ContainerDied","Data":"6efa13f3c1f1ef376f8c9106ef8b49ede65c553462019c2a0107ae516c4f2a50"} Nov 25 10:08:42 crc kubenswrapper[4769]: I1125 10:08:42.029982 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-d76d-account-create-cqmjg" event={"ID":"9817ed57-5bda-46f1-ad02-e2ac6e770543","Type":"ContainerStarted","Data":"9d7a4a6ec9534bf101a05c952288028f3ccb97747b20cf55be0a252e319db2dd"} Nov 25 10:08:42 crc kubenswrapper[4769]: I1125 10:08:42.220560 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pcp4d"] Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.042450 4769 generic.go:334] "Generic (PLEG): container finished" podID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" containerID="0bb6171a3bdc25ad1d6b7441d741418c4d6076094607cdccd2715f2368cabb44" exitCode=0 Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.042514 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcp4d" event={"ID":"7937ea4d-23ba-4f0a-af90-1257f8592d1e","Type":"ContainerDied","Data":"0bb6171a3bdc25ad1d6b7441d741418c4d6076094607cdccd2715f2368cabb44"} Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.042897 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcp4d" event={"ID":"7937ea4d-23ba-4f0a-af90-1257f8592d1e","Type":"ContainerStarted","Data":"26748e253416a88d899e5e60be4183719bf0584426482b9063faa81e2831dd84"} Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.544118 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-d76d-account-create-cqmjg" Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.551386 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-scf89" Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.669291 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9817ed57-5bda-46f1-ad02-e2ac6e770543-operator-scripts\") pod \"9817ed57-5bda-46f1-ad02-e2ac6e770543\" (UID: \"9817ed57-5bda-46f1-ad02-e2ac6e770543\") " Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.669355 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6f7z\" (UniqueName: \"kubernetes.io/projected/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f-kube-api-access-w6f7z\") pod \"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f\" (UID: \"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f\") " Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.669672 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f-operator-scripts\") pod \"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f\" (UID: \"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f\") " Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.669797 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zghht\" (UniqueName: \"kubernetes.io/projected/9817ed57-5bda-46f1-ad02-e2ac6e770543-kube-api-access-zghht\") pod \"9817ed57-5bda-46f1-ad02-e2ac6e770543\" (UID: \"9817ed57-5bda-46f1-ad02-e2ac6e770543\") " Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.670352 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9817ed57-5bda-46f1-ad02-e2ac6e770543-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9817ed57-5bda-46f1-ad02-e2ac6e770543" (UID: "9817ed57-5bda-46f1-ad02-e2ac6e770543"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.670372 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f" (UID: "4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.671531 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.671552 4769 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9817ed57-5bda-46f1-ad02-e2ac6e770543-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.676225 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9817ed57-5bda-46f1-ad02-e2ac6e770543-kube-api-access-zghht" (OuterVolumeSpecName: "kube-api-access-zghht") pod "9817ed57-5bda-46f1-ad02-e2ac6e770543" (UID: "9817ed57-5bda-46f1-ad02-e2ac6e770543"). InnerVolumeSpecName "kube-api-access-zghht". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.679131 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f-kube-api-access-w6f7z" (OuterVolumeSpecName: "kube-api-access-w6f7z") pod "4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f" (UID: "4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f"). InnerVolumeSpecName "kube-api-access-w6f7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.774688 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zghht\" (UniqueName: \"kubernetes.io/projected/9817ed57-5bda-46f1-ad02-e2ac6e770543-kube-api-access-zghht\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:43 crc kubenswrapper[4769]: I1125 10:08:43.775098 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6f7z\" (UniqueName: \"kubernetes.io/projected/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f-kube-api-access-w6f7z\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:44 crc kubenswrapper[4769]: I1125 10:08:44.056925 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-d76d-account-create-cqmjg" event={"ID":"9817ed57-5bda-46f1-ad02-e2ac6e770543","Type":"ContainerDied","Data":"9d7a4a6ec9534bf101a05c952288028f3ccb97747b20cf55be0a252e319db2dd"} Nov 25 10:08:44 crc kubenswrapper[4769]: I1125 10:08:44.056967 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-d76d-account-create-cqmjg" Nov 25 10:08:44 crc kubenswrapper[4769]: I1125 10:08:44.056985 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d7a4a6ec9534bf101a05c952288028f3ccb97747b20cf55be0a252e319db2dd" Nov 25 10:08:44 crc kubenswrapper[4769]: I1125 10:08:44.062682 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-scf89" event={"ID":"4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f","Type":"ContainerDied","Data":"219ed76a2e444b4723e7ee458a8fd1378373b6037250f3db9425b51c3257a239"} Nov 25 10:08:44 crc kubenswrapper[4769]: I1125 10:08:44.062707 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="219ed76a2e444b4723e7ee458a8fd1378373b6037250f3db9425b51c3257a239" Nov 25 10:08:44 crc kubenswrapper[4769]: I1125 10:08:44.062762 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-scf89" Nov 25 10:08:44 crc kubenswrapper[4769]: E1125 10:08:44.180647 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9817ed57_5bda_46f1_ad02_e2ac6e770543.slice/crio-9d7a4a6ec9534bf101a05c952288028f3ccb97747b20cf55be0a252e319db2dd\": RecentStats: unable to find data in memory cache]" Nov 25 10:08:45 crc kubenswrapper[4769]: I1125 10:08:45.107793 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcp4d" event={"ID":"7937ea4d-23ba-4f0a-af90-1257f8592d1e","Type":"ContainerStarted","Data":"5e547c7188a2ff86bc04a6c7ba2fd7f0d41e3889398b6c5765ab37e6b3ae12d5"} Nov 25 10:08:45 crc kubenswrapper[4769]: I1125 10:08:45.883367 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-gv6vg"] Nov 25 10:08:45 crc kubenswrapper[4769]: E1125 10:08:45.883850 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9817ed57-5bda-46f1-ad02-e2ac6e770543" containerName="mariadb-account-create" Nov 25 10:08:45 crc kubenswrapper[4769]: I1125 10:08:45.883865 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="9817ed57-5bda-46f1-ad02-e2ac6e770543" containerName="mariadb-account-create" Nov 25 10:08:45 crc kubenswrapper[4769]: E1125 10:08:45.883883 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f" containerName="mariadb-database-create" Nov 25 10:08:45 crc kubenswrapper[4769]: I1125 10:08:45.883889 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f" containerName="mariadb-database-create" Nov 25 10:08:45 crc kubenswrapper[4769]: I1125 10:08:45.884164 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="9817ed57-5bda-46f1-ad02-e2ac6e770543" containerName="mariadb-account-create" Nov 25 10:08:45 crc kubenswrapper[4769]: I1125 10:08:45.884202 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f" containerName="mariadb-database-create" Nov 25 10:08:45 crc kubenswrapper[4769]: I1125 10:08:45.886649 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:45 crc kubenswrapper[4769]: I1125 10:08:45.899813 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 10:08:45 crc kubenswrapper[4769]: I1125 10:08:45.905717 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 10:08:45 crc kubenswrapper[4769]: I1125 10:08:45.906455 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-5nnz9" Nov 25 10:08:45 crc kubenswrapper[4769]: I1125 10:08:45.906625 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 10:08:45 crc kubenswrapper[4769]: I1125 10:08:45.920153 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-gv6vg"] Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.037572 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-scripts\") pod \"aodh-db-sync-gv6vg\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.037664 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-combined-ca-bundle\") pod \"aodh-db-sync-gv6vg\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.037681 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6t84j\" (UniqueName: \"kubernetes.io/projected/cb6debff-79c0-45cc-915d-8d40aa9a4b78-kube-api-access-6t84j\") pod \"aodh-db-sync-gv6vg\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.037731 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-config-data\") pod \"aodh-db-sync-gv6vg\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.137810 4769 generic.go:334] "Generic (PLEG): container finished" podID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" containerID="5e547c7188a2ff86bc04a6c7ba2fd7f0d41e3889398b6c5765ab37e6b3ae12d5" exitCode=0 Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.137874 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcp4d" event={"ID":"7937ea4d-23ba-4f0a-af90-1257f8592d1e","Type":"ContainerDied","Data":"5e547c7188a2ff86bc04a6c7ba2fd7f0d41e3889398b6c5765ab37e6b3ae12d5"} Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.140911 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6t84j\" (UniqueName: \"kubernetes.io/projected/cb6debff-79c0-45cc-915d-8d40aa9a4b78-kube-api-access-6t84j\") pod \"aodh-db-sync-gv6vg\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.140974 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-combined-ca-bundle\") pod \"aodh-db-sync-gv6vg\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.141076 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-config-data\") pod \"aodh-db-sync-gv6vg\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.141250 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-scripts\") pod \"aodh-db-sync-gv6vg\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.149493 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-scripts\") pod \"aodh-db-sync-gv6vg\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.158019 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-config-data\") pod \"aodh-db-sync-gv6vg\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.184340 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-combined-ca-bundle\") pod \"aodh-db-sync-gv6vg\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.184640 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6t84j\" (UniqueName: \"kubernetes.io/projected/cb6debff-79c0-45cc-915d-8d40aa9a4b78-kube-api-access-6t84j\") pod \"aodh-db-sync-gv6vg\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.210673 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.295660 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 10:08:46 crc kubenswrapper[4769]: I1125 10:08:46.792043 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-gv6vg"] Nov 25 10:08:47 crc kubenswrapper[4769]: I1125 10:08:47.152570 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcp4d" event={"ID":"7937ea4d-23ba-4f0a-af90-1257f8592d1e","Type":"ContainerStarted","Data":"ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835"} Nov 25 10:08:47 crc kubenswrapper[4769]: I1125 10:08:47.154086 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-gv6vg" event={"ID":"cb6debff-79c0-45cc-915d-8d40aa9a4b78","Type":"ContainerStarted","Data":"337c97cac2db03c881ba0b1e21bd2e69f0fecfaed9e3ad8bfe5f221f63038516"} Nov 25 10:08:47 crc kubenswrapper[4769]: I1125 10:08:47.178207 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pcp4d" podStartSLOduration=2.586293507 podStartE2EDuration="6.178184072s" podCreationTimestamp="2025-11-25 10:08:41 +0000 UTC" firstStartedPulling="2025-11-25 10:08:43.045873569 +0000 UTC m=+1471.630845882" lastFinishedPulling="2025-11-25 10:08:46.637764134 +0000 UTC m=+1475.222736447" observedRunningTime="2025-11-25 10:08:47.174144028 +0000 UTC m=+1475.759116361" watchObservedRunningTime="2025-11-25 10:08:47.178184072 +0000 UTC m=+1475.763156385" Nov 25 10:08:48 crc kubenswrapper[4769]: I1125 10:08:48.432673 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.195154 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-xsqz9"] Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.197796 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.203428 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.203802 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.217697 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-xsqz9"] Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.365639 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-scripts\") pod \"nova-cell0-cell-mapping-xsqz9\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.366018 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-config-data\") pod \"nova-cell0-cell-mapping-xsqz9\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.366180 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77n2s\" (UniqueName: \"kubernetes.io/projected/8a7752ba-7e13-4f6c-af46-5df66ad668e3-kube-api-access-77n2s\") pod \"nova-cell0-cell-mapping-xsqz9\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.366792 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-xsqz9\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.434102 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.437352 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.444487 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.471764 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77n2s\" (UniqueName: \"kubernetes.io/projected/8a7752ba-7e13-4f6c-af46-5df66ad668e3-kube-api-access-77n2s\") pod \"nova-cell0-cell-mapping-xsqz9\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.472489 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-xsqz9\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.472587 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-scripts\") pod \"nova-cell0-cell-mapping-xsqz9\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.472644 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-config-data\") pod \"nova-cell0-cell-mapping-xsqz9\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.496883 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-config-data\") pod \"nova-cell0-cell-mapping-xsqz9\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.501758 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-xsqz9\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.502026 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-scripts\") pod \"nova-cell0-cell-mapping-xsqz9\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.513455 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.543368 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77n2s\" (UniqueName: \"kubernetes.io/projected/8a7752ba-7e13-4f6c-af46-5df66ad668e3-kube-api-access-77n2s\") pod \"nova-cell0-cell-mapping-xsqz9\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.563546 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.570721 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.572348 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.575105 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-config-data\") pod \"nova-api-0\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.575226 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-logs\") pod \"nova-api-0\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.575260 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.575360 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc6vn\" (UniqueName: \"kubernetes.io/projected/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-kube-api-access-nc6vn\") pod \"nova-api-0\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.579801 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.653753 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.677984 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-config-data\") pod \"nova-api-0\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.678078 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27485288-1c80-435a-a272-592d87c79882-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"27485288-1c80-435a-a272-592d87c79882\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.678127 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-logs\") pod \"nova-api-0\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.678150 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcclx\" (UniqueName: \"kubernetes.io/projected/27485288-1c80-435a-a272-592d87c79882-kube-api-access-gcclx\") pod \"nova-cell1-novncproxy-0\" (UID: \"27485288-1c80-435a-a272-592d87c79882\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.678182 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.678249 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc6vn\" (UniqueName: \"kubernetes.io/projected/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-kube-api-access-nc6vn\") pod \"nova-api-0\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.678296 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27485288-1c80-435a-a272-592d87c79882-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"27485288-1c80-435a-a272-592d87c79882\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.680224 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-logs\") pod \"nova-api-0\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.712110 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-config-data\") pod \"nova-api-0\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.712362 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.721026 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc6vn\" (UniqueName: \"kubernetes.io/projected/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-kube-api-access-nc6vn\") pod \"nova-api-0\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.730331 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.732324 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.745763 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.782284 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27485288-1c80-435a-a272-592d87c79882-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"27485288-1c80-435a-a272-592d87c79882\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.782664 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27485288-1c80-435a-a272-592d87c79882-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"27485288-1c80-435a-a272-592d87c79882\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.782715 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcclx\" (UniqueName: \"kubernetes.io/projected/27485288-1c80-435a-a272-592d87c79882-kube-api-access-gcclx\") pod \"nova-cell1-novncproxy-0\" (UID: \"27485288-1c80-435a-a272-592d87c79882\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.790748 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27485288-1c80-435a-a272-592d87c79882-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"27485288-1c80-435a-a272-592d87c79882\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.793402 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27485288-1c80-435a-a272-592d87c79882-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"27485288-1c80-435a-a272-592d87c79882\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.816614 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.819209 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.822090 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcclx\" (UniqueName: \"kubernetes.io/projected/27485288-1c80-435a-a272-592d87c79882-kube-api-access-gcclx\") pod \"nova-cell1-novncproxy-0\" (UID: \"27485288-1c80-435a-a272-592d87c79882\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.833218 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.880103 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.885562 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-logs\") pod \"nova-metadata-0\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " pod="openstack/nova-metadata-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.885687 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " pod="openstack/nova-metadata-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.885821 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7pdr\" (UniqueName: \"kubernetes.io/projected/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-kube-api-access-q7pdr\") pod \"nova-metadata-0\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " pod="openstack/nova-metadata-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.885979 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-config-data\") pod \"nova-metadata-0\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " pod="openstack/nova-metadata-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.909613 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.927634 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:08:50 crc kubenswrapper[4769]: I1125 10:08:50.978086 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.000655 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f41ea94-9525-4a09-96e8-cd6cbc81278a-config-data\") pod \"nova-scheduler-0\" (UID: \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\") " pod="openstack/nova-scheduler-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.000701 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f41ea94-9525-4a09-96e8-cd6cbc81278a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\") " pod="openstack/nova-scheduler-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.000768 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-logs\") pod \"nova-metadata-0\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " pod="openstack/nova-metadata-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.001049 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " pod="openstack/nova-metadata-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.002099 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7pdr\" (UniqueName: \"kubernetes.io/projected/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-kube-api-access-q7pdr\") pod \"nova-metadata-0\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " pod="openstack/nova-metadata-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.002328 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtvhh\" (UniqueName: \"kubernetes.io/projected/0f41ea94-9525-4a09-96e8-cd6cbc81278a-kube-api-access-jtvhh\") pod \"nova-scheduler-0\" (UID: \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\") " pod="openstack/nova-scheduler-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.002536 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-config-data\") pod \"nova-metadata-0\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " pod="openstack/nova-metadata-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.001281 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-logs\") pod \"nova-metadata-0\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " pod="openstack/nova-metadata-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.009230 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-tgxvk"] Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.009397 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " pod="openstack/nova-metadata-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.022841 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.026409 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-config-data\") pod \"nova-metadata-0\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " pod="openstack/nova-metadata-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.067810 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7pdr\" (UniqueName: \"kubernetes.io/projected/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-kube-api-access-q7pdr\") pod \"nova-metadata-0\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " pod="openstack/nova-metadata-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.093977 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-tgxvk"] Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.107604 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-config\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.107668 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.107719 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.107763 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.107860 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtvhh\" (UniqueName: \"kubernetes.io/projected/0f41ea94-9525-4a09-96e8-cd6cbc81278a-kube-api-access-jtvhh\") pod \"nova-scheduler-0\" (UID: \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\") " pod="openstack/nova-scheduler-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.107900 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxlht\" (UniqueName: \"kubernetes.io/projected/82684575-8aff-4214-bd2a-20289cc4446a-kube-api-access-lxlht\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.107942 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.108020 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f41ea94-9525-4a09-96e8-cd6cbc81278a-config-data\") pod \"nova-scheduler-0\" (UID: \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\") " pod="openstack/nova-scheduler-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.108270 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f41ea94-9525-4a09-96e8-cd6cbc81278a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\") " pod="openstack/nova-scheduler-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.113541 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f41ea94-9525-4a09-96e8-cd6cbc81278a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\") " pod="openstack/nova-scheduler-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.116337 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f41ea94-9525-4a09-96e8-cd6cbc81278a-config-data\") pod \"nova-scheduler-0\" (UID: \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\") " pod="openstack/nova-scheduler-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.136021 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtvhh\" (UniqueName: \"kubernetes.io/projected/0f41ea94-9525-4a09-96e8-cd6cbc81278a-kube-api-access-jtvhh\") pod \"nova-scheduler-0\" (UID: \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\") " pod="openstack/nova-scheduler-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.212508 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-config\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.214275 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.214782 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.214903 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.215130 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxlht\" (UniqueName: \"kubernetes.io/projected/82684575-8aff-4214-bd2a-20289cc4446a-kube-api-access-lxlht\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.215306 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.216138 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-ovsdbserver-sb\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.213660 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-config\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.216384 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-dns-svc\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.216916 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-dns-swift-storage-0\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.220595 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-ovsdbserver-nb\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.227935 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.234082 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxlht\" (UniqueName: \"kubernetes.io/projected/82684575-8aff-4214-bd2a-20289cc4446a-kube-api-access-lxlht\") pod \"dnsmasq-dns-5fbc4d444f-tgxvk\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.258543 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.369610 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.692780 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-w6f7r"] Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.699642 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.703493 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.703809 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.728631 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.729697 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.734566 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-w6f7r"] Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.832761 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-config-data\") pod \"nova-cell1-conductor-db-sync-w6f7r\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.832896 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-w6f7r\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.832927 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-scripts\") pod \"nova-cell1-conductor-db-sync-w6f7r\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.833021 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68gnt\" (UniqueName: \"kubernetes.io/projected/bd25577c-f800-4119-8e6f-20258030dfbc-kube-api-access-68gnt\") pod \"nova-cell1-conductor-db-sync-w6f7r\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.935732 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68gnt\" (UniqueName: \"kubernetes.io/projected/bd25577c-f800-4119-8e6f-20258030dfbc-kube-api-access-68gnt\") pod \"nova-cell1-conductor-db-sync-w6f7r\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.935904 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-config-data\") pod \"nova-cell1-conductor-db-sync-w6f7r\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.936051 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-w6f7r\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.936080 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-scripts\") pod \"nova-cell1-conductor-db-sync-w6f7r\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.941796 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-w6f7r\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.941824 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-config-data\") pod \"nova-cell1-conductor-db-sync-w6f7r\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.945522 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-scripts\") pod \"nova-cell1-conductor-db-sync-w6f7r\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:51 crc kubenswrapper[4769]: I1125 10:08:51.956832 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68gnt\" (UniqueName: \"kubernetes.io/projected/bd25577c-f800-4119-8e6f-20258030dfbc-kube-api-access-68gnt\") pod \"nova-cell1-conductor-db-sync-w6f7r\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:52 crc kubenswrapper[4769]: I1125 10:08:52.032881 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:08:52 crc kubenswrapper[4769]: I1125 10:08:52.813554 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-pcp4d" podUID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" containerName="registry-server" probeResult="failure" output=< Nov 25 10:08:52 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 10:08:52 crc kubenswrapper[4769]: > Nov 25 10:08:54 crc kubenswrapper[4769]: I1125 10:08:54.753542 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:08:54 crc kubenswrapper[4769]: I1125 10:08:54.767086 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:08:55 crc kubenswrapper[4769]: I1125 10:08:55.728877 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-xsqz9"] Nov 25 10:08:55 crc kubenswrapper[4769]: W1125 10:08:55.730858 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a7752ba_7e13_4f6c_af46_5df66ad668e3.slice/crio-68ab05a04255c77339f7d230b7db0c94a464240cb55c58632ac0b4bf27bfc49a WatchSource:0}: Error finding container 68ab05a04255c77339f7d230b7db0c94a464240cb55c58632ac0b4bf27bfc49a: Status 404 returned error can't find the container with id 68ab05a04255c77339f7d230b7db0c94a464240cb55c58632ac0b4bf27bfc49a Nov 25 10:08:55 crc kubenswrapper[4769]: I1125 10:08:55.793773 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-w6f7r"] Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.174552 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.194981 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.221348 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.277840 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-tgxvk"] Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.289645 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.366133 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" event={"ID":"82684575-8aff-4214-bd2a-20289cc4446a","Type":"ContainerStarted","Data":"d66c7b2fba4ba8d4c480f8a9490318a45822b5ad7d9b40ebb131e366c162b20c"} Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.369462 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e2a25971-ea84-4e6e-a5b8-bfad403b2c61","Type":"ContainerStarted","Data":"a1009c8610d8e673af71f8ea0ae4d9d9e1b6ce41282281d441e188d2f6e38df7"} Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.371585 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-gv6vg" event={"ID":"cb6debff-79c0-45cc-915d-8d40aa9a4b78","Type":"ContainerStarted","Data":"9c99c28d76e8806f8744965d1d05550b064ba4de5eeae673f98153124818e9a7"} Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.372793 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"27485288-1c80-435a-a272-592d87c79882","Type":"ContainerStarted","Data":"a4ec4a6939c8c09ee2895e82167711acca8d5ef529242c4adf4874d8e243e0f3"} Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.377187 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-w6f7r" event={"ID":"bd25577c-f800-4119-8e6f-20258030dfbc","Type":"ContainerStarted","Data":"4a020519aefc75cb906bbe26de0f7c58f76aab7f5eb5671179e31ca468fc1a6e"} Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.377230 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-w6f7r" event={"ID":"bd25577c-f800-4119-8e6f-20258030dfbc","Type":"ContainerStarted","Data":"18c971d585408360d81763fb49e03a4a86bdcc9b26cc609eb61719a528949530"} Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.378564 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0f41ea94-9525-4a09-96e8-cd6cbc81278a","Type":"ContainerStarted","Data":"12579da008ee1f345f74333625f575786aa3c5bcdfe1eeecca4bfede0c239c43"} Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.380315 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-xsqz9" event={"ID":"8a7752ba-7e13-4f6c-af46-5df66ad668e3","Type":"ContainerStarted","Data":"509a1cc9c9e190c94b3b3647dac50326f2abe9ea6fab6c5352c013fd3308fe47"} Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.380355 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-xsqz9" event={"ID":"8a7752ba-7e13-4f6c-af46-5df66ad668e3","Type":"ContainerStarted","Data":"68ab05a04255c77339f7d230b7db0c94a464240cb55c58632ac0b4bf27bfc49a"} Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.386298 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb","Type":"ContainerStarted","Data":"037e64df66e613b907428caa18414557cedbf38f827be07eb14158b1b36d774c"} Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.391436 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-gv6vg" podStartSLOduration=3.389110024 podStartE2EDuration="11.391421793s" podCreationTimestamp="2025-11-25 10:08:45 +0000 UTC" firstStartedPulling="2025-11-25 10:08:46.795276653 +0000 UTC m=+1475.380248966" lastFinishedPulling="2025-11-25 10:08:54.797588422 +0000 UTC m=+1483.382560735" observedRunningTime="2025-11-25 10:08:56.386493816 +0000 UTC m=+1484.971466129" watchObservedRunningTime="2025-11-25 10:08:56.391421793 +0000 UTC m=+1484.976394106" Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.412316 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-xsqz9" podStartSLOduration=6.412296138 podStartE2EDuration="6.412296138s" podCreationTimestamp="2025-11-25 10:08:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:08:56.404540859 +0000 UTC m=+1484.989513172" watchObservedRunningTime="2025-11-25 10:08:56.412296138 +0000 UTC m=+1484.997268451" Nov 25 10:08:56 crc kubenswrapper[4769]: I1125 10:08:56.434363 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-w6f7r" podStartSLOduration=5.434328513 podStartE2EDuration="5.434328513s" podCreationTimestamp="2025-11-25 10:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:08:56.419312728 +0000 UTC m=+1485.004285051" watchObservedRunningTime="2025-11-25 10:08:56.434328513 +0000 UTC m=+1485.019300826" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.541617 4769 generic.go:334] "Generic (PLEG): container finished" podID="5bd31c36-b250-469b-b700-56617d826d79" containerID="f1198c60590d8d1f401e3318ddb6cc136ac2779af8cfac1481eead81cbdbc938" exitCode=137 Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.542054 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bd31c36-b250-469b-b700-56617d826d79","Type":"ContainerDied","Data":"f1198c60590d8d1f401e3318ddb6cc136ac2779af8cfac1481eead81cbdbc938"} Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.542086 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5bd31c36-b250-469b-b700-56617d826d79","Type":"ContainerDied","Data":"da32657a82a271aa12206af83b269d293075fef2ce0ccb323a79d0fb6898bf91"} Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.542098 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da32657a82a271aa12206af83b269d293075fef2ce0ccb323a79d0fb6898bf91" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.553132 4769 generic.go:334] "Generic (PLEG): container finished" podID="82684575-8aff-4214-bd2a-20289cc4446a" containerID="b2732f1b5e7d004b02f39149542974a71c63cc17b214af80fb4e4b75ab1b9654" exitCode=0 Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.554585 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" event={"ID":"82684575-8aff-4214-bd2a-20289cc4446a","Type":"ContainerDied","Data":"b2732f1b5e7d004b02f39149542974a71c63cc17b214af80fb4e4b75ab1b9654"} Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.568997 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.745609 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bd31c36-b250-469b-b700-56617d826d79-log-httpd\") pod \"5bd31c36-b250-469b-b700-56617d826d79\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.745683 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-scripts\") pod \"5bd31c36-b250-469b-b700-56617d826d79\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.745766 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r78zz\" (UniqueName: \"kubernetes.io/projected/5bd31c36-b250-469b-b700-56617d826d79-kube-api-access-r78zz\") pod \"5bd31c36-b250-469b-b700-56617d826d79\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.745809 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bd31c36-b250-469b-b700-56617d826d79-run-httpd\") pod \"5bd31c36-b250-469b-b700-56617d826d79\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.745906 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-sg-core-conf-yaml\") pod \"5bd31c36-b250-469b-b700-56617d826d79\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.746103 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-config-data\") pod \"5bd31c36-b250-469b-b700-56617d826d79\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.746130 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-combined-ca-bundle\") pod \"5bd31c36-b250-469b-b700-56617d826d79\" (UID: \"5bd31c36-b250-469b-b700-56617d826d79\") " Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.746416 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5bd31c36-b250-469b-b700-56617d826d79-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5bd31c36-b250-469b-b700-56617d826d79" (UID: "5bd31c36-b250-469b-b700-56617d826d79"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.747603 4769 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bd31c36-b250-469b-b700-56617d826d79-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.752844 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-scripts" (OuterVolumeSpecName: "scripts") pod "5bd31c36-b250-469b-b700-56617d826d79" (UID: "5bd31c36-b250-469b-b700-56617d826d79"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.760246 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5bd31c36-b250-469b-b700-56617d826d79-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5bd31c36-b250-469b-b700-56617d826d79" (UID: "5bd31c36-b250-469b-b700-56617d826d79"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.771143 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bd31c36-b250-469b-b700-56617d826d79-kube-api-access-r78zz" (OuterVolumeSpecName: "kube-api-access-r78zz") pod "5bd31c36-b250-469b-b700-56617d826d79" (UID: "5bd31c36-b250-469b-b700-56617d826d79"). InnerVolumeSpecName "kube-api-access-r78zz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.832124 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5bd31c36-b250-469b-b700-56617d826d79" (UID: "5bd31c36-b250-469b-b700-56617d826d79"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.853147 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.853456 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r78zz\" (UniqueName: \"kubernetes.io/projected/5bd31c36-b250-469b-b700-56617d826d79-kube-api-access-r78zz\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.853601 4769 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5bd31c36-b250-469b-b700-56617d826d79-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.853670 4769 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.939782 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-config-data" (OuterVolumeSpecName: "config-data") pod "5bd31c36-b250-469b-b700-56617d826d79" (UID: "5bd31c36-b250-469b-b700-56617d826d79"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.950192 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5bd31c36-b250-469b-b700-56617d826d79" (UID: "5bd31c36-b250-469b-b700-56617d826d79"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.955971 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:57 crc kubenswrapper[4769]: I1125 10:08:57.956006 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5bd31c36-b250-469b-b700-56617d826d79-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.569515 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.615999 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.645704 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.675443 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:08:58 crc kubenswrapper[4769]: E1125 10:08:58.676030 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="sg-core" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.676049 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="sg-core" Nov 25 10:08:58 crc kubenswrapper[4769]: E1125 10:08:58.676089 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="ceilometer-central-agent" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.676095 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="ceilometer-central-agent" Nov 25 10:08:58 crc kubenswrapper[4769]: E1125 10:08:58.676110 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="ceilometer-notification-agent" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.676116 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="ceilometer-notification-agent" Nov 25 10:08:58 crc kubenswrapper[4769]: E1125 10:08:58.676133 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="proxy-httpd" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.676140 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="proxy-httpd" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.676369 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="sg-core" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.676383 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="proxy-httpd" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.676395 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="ceilometer-central-agent" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.676414 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bd31c36-b250-469b-b700-56617d826d79" containerName="ceilometer-notification-agent" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.678645 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.681582 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.682434 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.691136 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.876792 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-scripts\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.876858 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-config-data\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.876903 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.876929 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-log-httpd\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.877190 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4kgf\" (UniqueName: \"kubernetes.io/projected/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-kube-api-access-l4kgf\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.877312 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.877347 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-run-httpd\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.979842 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.980302 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-log-httpd\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.980473 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4kgf\" (UniqueName: \"kubernetes.io/projected/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-kube-api-access-l4kgf\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.980551 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.980614 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-run-httpd\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.980727 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-scripts\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.980812 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-config-data\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.982934 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-run-httpd\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.983382 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-log-httpd\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.989903 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.989926 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:58 crc kubenswrapper[4769]: I1125 10:08:58.993353 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-config-data\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:59 crc kubenswrapper[4769]: I1125 10:08:59.005427 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4kgf\" (UniqueName: \"kubernetes.io/projected/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-kube-api-access-l4kgf\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:59 crc kubenswrapper[4769]: I1125 10:08:59.007517 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-scripts\") pod \"ceilometer-0\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " pod="openstack/ceilometer-0" Nov 25 10:08:59 crc kubenswrapper[4769]: I1125 10:08:59.011559 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:08:59 crc kubenswrapper[4769]: I1125 10:08:59.589534 4769 generic.go:334] "Generic (PLEG): container finished" podID="cb6debff-79c0-45cc-915d-8d40aa9a4b78" containerID="9c99c28d76e8806f8744965d1d05550b064ba4de5eeae673f98153124818e9a7" exitCode=0 Nov 25 10:08:59 crc kubenswrapper[4769]: I1125 10:08:59.589586 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-gv6vg" event={"ID":"cb6debff-79c0-45cc-915d-8d40aa9a4b78","Type":"ContainerDied","Data":"9c99c28d76e8806f8744965d1d05550b064ba4de5eeae673f98153124818e9a7"} Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.259997 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5bd31c36-b250-469b-b700-56617d826d79" path="/var/lib/kubelet/pods/5bd31c36-b250-469b-b700-56617d826d79/volumes" Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.553454 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:00 crc kubenswrapper[4769]: W1125 10:09:00.558814 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49f73e1f_e6d9_4855_8b3d_132a8c8dc7a7.slice/crio-fc6f153fc79138569c535886823b873a27921f272edc9d587d3dbd04d34eb963 WatchSource:0}: Error finding container fc6f153fc79138569c535886823b873a27921f272edc9d587d3dbd04d34eb963: Status 404 returned error can't find the container with id fc6f153fc79138569c535886823b873a27921f272edc9d587d3dbd04d34eb963 Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.618905 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"27485288-1c80-435a-a272-592d87c79882","Type":"ContainerStarted","Data":"f3fbf9236d34cdc902d87415a559cfec72b39ea8552cf1fba968a651fc672986"} Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.619129 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="27485288-1c80-435a-a272-592d87c79882" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://f3fbf9236d34cdc902d87415a559cfec72b39ea8552cf1fba968a651fc672986" gracePeriod=30 Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.627454 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0f41ea94-9525-4a09-96e8-cd6cbc81278a","Type":"ContainerStarted","Data":"c236b72e809a4e5e305f9a7b1ec56438a38e8b53274cf98e0b9321ef3bf1b890"} Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.630326 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7","Type":"ContainerStarted","Data":"fc6f153fc79138569c535886823b873a27921f272edc9d587d3dbd04d34eb963"} Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.634796 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb","Type":"ContainerStarted","Data":"0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07"} Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.647601 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" event={"ID":"82684575-8aff-4214-bd2a-20289cc4446a","Type":"ContainerStarted","Data":"513bcc0d36d6dace1da1845551ef157a03d1162376bd7f6d0ac36277a659431b"} Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.647779 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.661939 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e2a25971-ea84-4e6e-a5b8-bfad403b2c61","Type":"ContainerStarted","Data":"e870ca18963651ffab71b6627f6f4689c14195a4b82c260d1969ecc722103ee0"} Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.668342 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=6.869528482 podStartE2EDuration="10.668308862s" podCreationTimestamp="2025-11-25 10:08:50 +0000 UTC" firstStartedPulling="2025-11-25 10:08:56.188024227 +0000 UTC m=+1484.772996540" lastFinishedPulling="2025-11-25 10:08:59.986804617 +0000 UTC m=+1488.571776920" observedRunningTime="2025-11-25 10:09:00.657177326 +0000 UTC m=+1489.242149629" watchObservedRunningTime="2025-11-25 10:09:00.668308862 +0000 UTC m=+1489.253281175" Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.711258 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" podStartSLOduration=10.711238953 podStartE2EDuration="10.711238953s" podCreationTimestamp="2025-11-25 10:08:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:09:00.679845428 +0000 UTC m=+1489.264817751" watchObservedRunningTime="2025-11-25 10:09:00.711238953 +0000 UTC m=+1489.296211266" Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.747396 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=7.047827814 podStartE2EDuration="10.747366249s" podCreationTimestamp="2025-11-25 10:08:50 +0000 UTC" firstStartedPulling="2025-11-25 10:08:56.287279632 +0000 UTC m=+1484.872251945" lastFinishedPulling="2025-11-25 10:08:59.986818067 +0000 UTC m=+1488.571790380" observedRunningTime="2025-11-25 10:09:00.702841217 +0000 UTC m=+1489.287813530" watchObservedRunningTime="2025-11-25 10:09:00.747366249 +0000 UTC m=+1489.332338562" Nov 25 10:09:00 crc kubenswrapper[4769]: I1125 10:09:00.978389 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.259573 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.261240 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.306394 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.308447 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.418209 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6t84j\" (UniqueName: \"kubernetes.io/projected/cb6debff-79c0-45cc-915d-8d40aa9a4b78-kube-api-access-6t84j\") pod \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.418291 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-scripts\") pod \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.418336 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-config-data\") pod \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.418379 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-combined-ca-bundle\") pod \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\" (UID: \"cb6debff-79c0-45cc-915d-8d40aa9a4b78\") " Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.424118 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-scripts" (OuterVolumeSpecName: "scripts") pod "cb6debff-79c0-45cc-915d-8d40aa9a4b78" (UID: "cb6debff-79c0-45cc-915d-8d40aa9a4b78"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.424953 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb6debff-79c0-45cc-915d-8d40aa9a4b78-kube-api-access-6t84j" (OuterVolumeSpecName: "kube-api-access-6t84j") pod "cb6debff-79c0-45cc-915d-8d40aa9a4b78" (UID: "cb6debff-79c0-45cc-915d-8d40aa9a4b78"). InnerVolumeSpecName "kube-api-access-6t84j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.464586 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb6debff-79c0-45cc-915d-8d40aa9a4b78" (UID: "cb6debff-79c0-45cc-915d-8d40aa9a4b78"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.465623 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-config-data" (OuterVolumeSpecName: "config-data") pod "cb6debff-79c0-45cc-915d-8d40aa9a4b78" (UID: "cb6debff-79c0-45cc-915d-8d40aa9a4b78"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.521720 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6t84j\" (UniqueName: \"kubernetes.io/projected/cb6debff-79c0-45cc-915d-8d40aa9a4b78-kube-api-access-6t84j\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.522047 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.522057 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.522067 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb6debff-79c0-45cc-915d-8d40aa9a4b78-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.701460 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-gv6vg" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.702216 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-gv6vg" event={"ID":"cb6debff-79c0-45cc-915d-8d40aa9a4b78","Type":"ContainerDied","Data":"337c97cac2db03c881ba0b1e21bd2e69f0fecfaed9e3ad8bfe5f221f63038516"} Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.702296 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="337c97cac2db03c881ba0b1e21bd2e69f0fecfaed9e3ad8bfe5f221f63038516" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.715356 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7","Type":"ContainerStarted","Data":"3aeae9b71b5932ba56ffa4c52bd7b7401c0770ec89df7fe9ba4e13a70549dc01"} Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.721228 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb","Type":"ContainerStarted","Data":"45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf"} Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.728377 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e2a25971-ea84-4e6e-a5b8-bfad403b2c61","Type":"ContainerStarted","Data":"9c97455d1e67b0e77a0c5c3d484eaa842a1b285abec550108ab8031928619828"} Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.728798 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e2a25971-ea84-4e6e-a5b8-bfad403b2c61" containerName="nova-metadata-log" containerID="cri-o://e870ca18963651ffab71b6627f6f4689c14195a4b82c260d1969ecc722103ee0" gracePeriod=30 Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.728879 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e2a25971-ea84-4e6e-a5b8-bfad403b2c61" containerName="nova-metadata-metadata" containerID="cri-o://9c97455d1e67b0e77a0c5c3d484eaa842a1b285abec550108ab8031928619828" gracePeriod=30 Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.745644 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=7.955660673 podStartE2EDuration="11.745624877s" podCreationTimestamp="2025-11-25 10:08:50 +0000 UTC" firstStartedPulling="2025-11-25 10:08:56.196874564 +0000 UTC m=+1484.781846877" lastFinishedPulling="2025-11-25 10:08:59.986838768 +0000 UTC m=+1488.571811081" observedRunningTime="2025-11-25 10:09:01.742331833 +0000 UTC m=+1490.327304146" watchObservedRunningTime="2025-11-25 10:09:01.745624877 +0000 UTC m=+1490.330597190" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.794293 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=8.039835591 podStartE2EDuration="11.794263464s" podCreationTimestamp="2025-11-25 10:08:50 +0000 UTC" firstStartedPulling="2025-11-25 10:08:56.252288895 +0000 UTC m=+1484.837261208" lastFinishedPulling="2025-11-25 10:09:00.006716768 +0000 UTC m=+1488.591689081" observedRunningTime="2025-11-25 10:09:01.768018771 +0000 UTC m=+1490.352991084" watchObservedRunningTime="2025-11-25 10:09:01.794263464 +0000 UTC m=+1490.379235777" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.812203 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.841454 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:09:01 crc kubenswrapper[4769]: I1125 10:09:01.918416 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.107315 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pcp4d"] Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.774439 4769 generic.go:334] "Generic (PLEG): container finished" podID="e2a25971-ea84-4e6e-a5b8-bfad403b2c61" containerID="9c97455d1e67b0e77a0c5c3d484eaa842a1b285abec550108ab8031928619828" exitCode=0 Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.774932 4769 generic.go:334] "Generic (PLEG): container finished" podID="e2a25971-ea84-4e6e-a5b8-bfad403b2c61" containerID="e870ca18963651ffab71b6627f6f4689c14195a4b82c260d1969ecc722103ee0" exitCode=143 Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.775006 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e2a25971-ea84-4e6e-a5b8-bfad403b2c61","Type":"ContainerDied","Data":"9c97455d1e67b0e77a0c5c3d484eaa842a1b285abec550108ab8031928619828"} Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.775047 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e2a25971-ea84-4e6e-a5b8-bfad403b2c61","Type":"ContainerDied","Data":"e870ca18963651ffab71b6627f6f4689c14195a4b82c260d1969ecc722103ee0"} Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.798553 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7","Type":"ContainerStarted","Data":"2568c2552eb4413283b6fc1e4392130452b5df9d981d7365fc46b56b8a8ff06e"} Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.814166 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.968364 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7pdr\" (UniqueName: \"kubernetes.io/projected/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-kube-api-access-q7pdr\") pod \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.968455 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-logs\") pod \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.968697 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-config-data\") pod \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.968732 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-combined-ca-bundle\") pod \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\" (UID: \"e2a25971-ea84-4e6e-a5b8-bfad403b2c61\") " Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.969750 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-logs" (OuterVolumeSpecName: "logs") pod "e2a25971-ea84-4e6e-a5b8-bfad403b2c61" (UID: "e2a25971-ea84-4e6e-a5b8-bfad403b2c61"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:02 crc kubenswrapper[4769]: I1125 10:09:02.982265 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-kube-api-access-q7pdr" (OuterVolumeSpecName: "kube-api-access-q7pdr") pod "e2a25971-ea84-4e6e-a5b8-bfad403b2c61" (UID: "e2a25971-ea84-4e6e-a5b8-bfad403b2c61"). InnerVolumeSpecName "kube-api-access-q7pdr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.020092 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e2a25971-ea84-4e6e-a5b8-bfad403b2c61" (UID: "e2a25971-ea84-4e6e-a5b8-bfad403b2c61"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.048260 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-config-data" (OuterVolumeSpecName: "config-data") pod "e2a25971-ea84-4e6e-a5b8-bfad403b2c61" (UID: "e2a25971-ea84-4e6e-a5b8-bfad403b2c61"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.078909 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.079473 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.079503 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7pdr\" (UniqueName: \"kubernetes.io/projected/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-kube-api-access-q7pdr\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.079518 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e2a25971-ea84-4e6e-a5b8-bfad403b2c61-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.863835 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7","Type":"ContainerStarted","Data":"33c538311f856ad3fdb6b4fcb1f7ddae812c9c0f6a75d5b569f0f9aec3f9be23"} Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.868467 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.877035 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e2a25971-ea84-4e6e-a5b8-bfad403b2c61","Type":"ContainerDied","Data":"a1009c8610d8e673af71f8ea0ae4d9d9e1b6ce41282281d441e188d2f6e38df7"} Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.877268 4769 scope.go:117] "RemoveContainer" containerID="9c97455d1e67b0e77a0c5c3d484eaa842a1b285abec550108ab8031928619828" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.877618 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pcp4d" podUID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" containerName="registry-server" containerID="cri-o://ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835" gracePeriod=2 Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.931308 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.951969 4769 scope.go:117] "RemoveContainer" containerID="e870ca18963651ffab71b6627f6f4689c14195a4b82c260d1969ecc722103ee0" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.980716 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.994152 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:03 crc kubenswrapper[4769]: E1125 10:09:03.998779 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb6debff-79c0-45cc-915d-8d40aa9a4b78" containerName="aodh-db-sync" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.998809 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb6debff-79c0-45cc-915d-8d40aa9a4b78" containerName="aodh-db-sync" Nov 25 10:09:03 crc kubenswrapper[4769]: E1125 10:09:03.998823 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2a25971-ea84-4e6e-a5b8-bfad403b2c61" containerName="nova-metadata-log" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.998832 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2a25971-ea84-4e6e-a5b8-bfad403b2c61" containerName="nova-metadata-log" Nov 25 10:09:03 crc kubenswrapper[4769]: E1125 10:09:03.998843 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2a25971-ea84-4e6e-a5b8-bfad403b2c61" containerName="nova-metadata-metadata" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.998852 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2a25971-ea84-4e6e-a5b8-bfad403b2c61" containerName="nova-metadata-metadata" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.999132 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2a25971-ea84-4e6e-a5b8-bfad403b2c61" containerName="nova-metadata-log" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.999142 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb6debff-79c0-45cc-915d-8d40aa9a4b78" containerName="aodh-db-sync" Nov 25 10:09:03 crc kubenswrapper[4769]: I1125 10:09:03.999148 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2a25971-ea84-4e6e-a5b8-bfad403b2c61" containerName="nova-metadata-metadata" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.000514 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.007435 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.007707 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.017912 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.130484 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-config-data\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.130535 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-logs\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.130558 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.130669 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.130768 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlw4m\" (UniqueName: \"kubernetes.io/projected/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-kube-api-access-nlw4m\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.240915 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlw4m\" (UniqueName: \"kubernetes.io/projected/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-kube-api-access-nlw4m\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.241232 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-config-data\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.241268 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-logs\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.241297 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.241483 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.241978 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-logs\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.251039 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-config-data\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.252068 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.252775 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.269186 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlw4m\" (UniqueName: \"kubernetes.io/projected/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-kube-api-access-nlw4m\") pod \"nova-metadata-0\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.280351 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2a25971-ea84-4e6e-a5b8-bfad403b2c61" path="/var/lib/kubelet/pods/e2a25971-ea84-4e6e-a5b8-bfad403b2c61/volumes" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.415627 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.586729 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.691192 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7937ea4d-23ba-4f0a-af90-1257f8592d1e-catalog-content\") pod \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\" (UID: \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\") " Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.691692 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpsd8\" (UniqueName: \"kubernetes.io/projected/7937ea4d-23ba-4f0a-af90-1257f8592d1e-kube-api-access-kpsd8\") pod \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\" (UID: \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\") " Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.691763 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7937ea4d-23ba-4f0a-af90-1257f8592d1e-utilities\") pod \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\" (UID: \"7937ea4d-23ba-4f0a-af90-1257f8592d1e\") " Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.692553 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7937ea4d-23ba-4f0a-af90-1257f8592d1e-utilities" (OuterVolumeSpecName: "utilities") pod "7937ea4d-23ba-4f0a-af90-1257f8592d1e" (UID: "7937ea4d-23ba-4f0a-af90-1257f8592d1e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.693996 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7937ea4d-23ba-4f0a-af90-1257f8592d1e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.702499 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7937ea4d-23ba-4f0a-af90-1257f8592d1e-kube-api-access-kpsd8" (OuterVolumeSpecName: "kube-api-access-kpsd8") pod "7937ea4d-23ba-4f0a-af90-1257f8592d1e" (UID: "7937ea4d-23ba-4f0a-af90-1257f8592d1e"). InnerVolumeSpecName "kube-api-access-kpsd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.790694 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7937ea4d-23ba-4f0a-af90-1257f8592d1e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7937ea4d-23ba-4f0a-af90-1257f8592d1e" (UID: "7937ea4d-23ba-4f0a-af90-1257f8592d1e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.796320 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7937ea4d-23ba-4f0a-af90-1257f8592d1e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.796360 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpsd8\" (UniqueName: \"kubernetes.io/projected/7937ea4d-23ba-4f0a-af90-1257f8592d1e-kube-api-access-kpsd8\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.884668 4769 generic.go:334] "Generic (PLEG): container finished" podID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" containerID="ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835" exitCode=0 Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.885216 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcp4d" event={"ID":"7937ea4d-23ba-4f0a-af90-1257f8592d1e","Type":"ContainerDied","Data":"ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835"} Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.885250 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pcp4d" event={"ID":"7937ea4d-23ba-4f0a-af90-1257f8592d1e","Type":"ContainerDied","Data":"26748e253416a88d899e5e60be4183719bf0584426482b9063faa81e2831dd84"} Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.885438 4769 scope.go:117] "RemoveContainer" containerID="ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.885626 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pcp4d" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.925903 4769 scope.go:117] "RemoveContainer" containerID="5e547c7188a2ff86bc04a6c7ba2fd7f0d41e3889398b6c5765ab37e6b3ae12d5" Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.935772 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pcp4d"] Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.965818 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pcp4d"] Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.966428 4769 scope.go:117] "RemoveContainer" containerID="0bb6171a3bdc25ad1d6b7441d741418c4d6076094607cdccd2715f2368cabb44" Nov 25 10:09:04 crc kubenswrapper[4769]: W1125 10:09:04.975559 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5e4001a_e8bc_4aa9_97a7_1e4f6a2d2aa4.slice/crio-fbcc6dc78e5f29254c662250da77b60dc0532505ae6011f2b364f95bf54ce583 WatchSource:0}: Error finding container fbcc6dc78e5f29254c662250da77b60dc0532505ae6011f2b364f95bf54ce583: Status 404 returned error can't find the container with id fbcc6dc78e5f29254c662250da77b60dc0532505ae6011f2b364f95bf54ce583 Nov 25 10:09:04 crc kubenswrapper[4769]: I1125 10:09:04.980406 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.035112 4769 scope.go:117] "RemoveContainer" containerID="ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835" Nov 25 10:09:05 crc kubenswrapper[4769]: E1125 10:09:05.038364 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835\": container with ID starting with ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835 not found: ID does not exist" containerID="ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.038414 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835"} err="failed to get container status \"ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835\": rpc error: code = NotFound desc = could not find container \"ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835\": container with ID starting with ffa25a35b74ad97be14908cef48e22c3ab33399855d6036e64d903a2ea91d835 not found: ID does not exist" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.038442 4769 scope.go:117] "RemoveContainer" containerID="5e547c7188a2ff86bc04a6c7ba2fd7f0d41e3889398b6c5765ab37e6b3ae12d5" Nov 25 10:09:05 crc kubenswrapper[4769]: E1125 10:09:05.038760 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e547c7188a2ff86bc04a6c7ba2fd7f0d41e3889398b6c5765ab37e6b3ae12d5\": container with ID starting with 5e547c7188a2ff86bc04a6c7ba2fd7f0d41e3889398b6c5765ab37e6b3ae12d5 not found: ID does not exist" containerID="5e547c7188a2ff86bc04a6c7ba2fd7f0d41e3889398b6c5765ab37e6b3ae12d5" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.038781 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e547c7188a2ff86bc04a6c7ba2fd7f0d41e3889398b6c5765ab37e6b3ae12d5"} err="failed to get container status \"5e547c7188a2ff86bc04a6c7ba2fd7f0d41e3889398b6c5765ab37e6b3ae12d5\": rpc error: code = NotFound desc = could not find container \"5e547c7188a2ff86bc04a6c7ba2fd7f0d41e3889398b6c5765ab37e6b3ae12d5\": container with ID starting with 5e547c7188a2ff86bc04a6c7ba2fd7f0d41e3889398b6c5765ab37e6b3ae12d5 not found: ID does not exist" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.038804 4769 scope.go:117] "RemoveContainer" containerID="0bb6171a3bdc25ad1d6b7441d741418c4d6076094607cdccd2715f2368cabb44" Nov 25 10:09:05 crc kubenswrapper[4769]: E1125 10:09:05.039242 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0bb6171a3bdc25ad1d6b7441d741418c4d6076094607cdccd2715f2368cabb44\": container with ID starting with 0bb6171a3bdc25ad1d6b7441d741418c4d6076094607cdccd2715f2368cabb44 not found: ID does not exist" containerID="0bb6171a3bdc25ad1d6b7441d741418c4d6076094607cdccd2715f2368cabb44" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.039267 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0bb6171a3bdc25ad1d6b7441d741418c4d6076094607cdccd2715f2368cabb44"} err="failed to get container status \"0bb6171a3bdc25ad1d6b7441d741418c4d6076094607cdccd2715f2368cabb44\": rpc error: code = NotFound desc = could not find container \"0bb6171a3bdc25ad1d6b7441d741418c4d6076094607cdccd2715f2368cabb44\": container with ID starting with 0bb6171a3bdc25ad1d6b7441d741418c4d6076094607cdccd2715f2368cabb44 not found: ID does not exist" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.559839 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 25 10:09:05 crc kubenswrapper[4769]: E1125 10:09:05.560608 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" containerName="registry-server" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.560631 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" containerName="registry-server" Nov 25 10:09:05 crc kubenswrapper[4769]: E1125 10:09:05.560703 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" containerName="extract-content" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.560712 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" containerName="extract-content" Nov 25 10:09:05 crc kubenswrapper[4769]: E1125 10:09:05.560725 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" containerName="extract-utilities" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.560733 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" containerName="extract-utilities" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.561195 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" containerName="registry-server" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.566428 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.569311 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.569423 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.569542 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-5nnz9" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.600315 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.672778 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-combined-ca-bundle\") pod \"aodh-0\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.672853 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r4v4\" (UniqueName: \"kubernetes.io/projected/33335ac3-fa4e-4a44-89b7-71083bcf7930-kube-api-access-9r4v4\") pod \"aodh-0\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.673402 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-config-data\") pod \"aodh-0\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.673753 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-scripts\") pod \"aodh-0\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.778616 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-scripts\") pod \"aodh-0\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.780440 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-combined-ca-bundle\") pod \"aodh-0\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.780508 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r4v4\" (UniqueName: \"kubernetes.io/projected/33335ac3-fa4e-4a44-89b7-71083bcf7930-kube-api-access-9r4v4\") pod \"aodh-0\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.780673 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-config-data\") pod \"aodh-0\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.783931 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-scripts\") pod \"aodh-0\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.786606 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-config-data\") pod \"aodh-0\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.788748 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-combined-ca-bundle\") pod \"aodh-0\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.811641 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r4v4\" (UniqueName: \"kubernetes.io/projected/33335ac3-fa4e-4a44-89b7-71083bcf7930-kube-api-access-9r4v4\") pod \"aodh-0\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.908315 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7","Type":"ContainerStarted","Data":"c293999bf19c2c03f3b1ede2528f334bcf427233bfced41a317c3b4f1fff5647"} Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.910247 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.915913 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4","Type":"ContainerStarted","Data":"a1ad1b43aee177a2d262fa0522d2d1c326f0bd1a5d0dbca1571445337572cd1e"} Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.916006 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4","Type":"ContainerStarted","Data":"3cd8e97579b9f23560f2f00166785db754d9cf1ffbd4d80c9acc0958c294713d"} Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.916044 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4","Type":"ContainerStarted","Data":"fbcc6dc78e5f29254c662250da77b60dc0532505ae6011f2b364f95bf54ce583"} Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.943268 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.812555984 podStartE2EDuration="7.943247815s" podCreationTimestamp="2025-11-25 10:08:58 +0000 UTC" firstStartedPulling="2025-11-25 10:09:00.56447321 +0000 UTC m=+1489.149445513" lastFinishedPulling="2025-11-25 10:09:04.695165031 +0000 UTC m=+1493.280137344" observedRunningTime="2025-11-25 10:09:05.934118161 +0000 UTC m=+1494.519090474" watchObservedRunningTime="2025-11-25 10:09:05.943247815 +0000 UTC m=+1494.528220128" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.954435 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:09:05 crc kubenswrapper[4769]: I1125 10:09:05.967762 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.967738053 podStartE2EDuration="2.967738053s" podCreationTimestamp="2025-11-25 10:09:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:09:05.951400424 +0000 UTC m=+1494.536372737" watchObservedRunningTime="2025-11-25 10:09:05.967738053 +0000 UTC m=+1494.552710366" Nov 25 10:09:06 crc kubenswrapper[4769]: I1125 10:09:06.259727 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7937ea4d-23ba-4f0a-af90-1257f8592d1e" path="/var/lib/kubelet/pods/7937ea4d-23ba-4f0a-af90-1257f8592d1e/volumes" Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:06.372114 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:06.560261 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-tq8xm"] Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:06.560548 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" podUID="a8c0623e-d0b4-4495-a44c-07a1331dcc0f" containerName="dnsmasq-dns" containerID="cri-o://54e4e54498971d5c9fb360958369f829c428201971d966116c3768f99ce31358" gracePeriod=10 Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:06.635831 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:06.974913 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"33335ac3-fa4e-4a44-89b7-71083bcf7930","Type":"ContainerStarted","Data":"d4d1c69d86123e49c9f408cc10dc0d8db15e112a622aeff35d69b31af9002784"} Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.010156 4769 generic.go:334] "Generic (PLEG): container finished" podID="a8c0623e-d0b4-4495-a44c-07a1331dcc0f" containerID="54e4e54498971d5c9fb360958369f829c428201971d966116c3768f99ce31358" exitCode=0 Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.010247 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" event={"ID":"a8c0623e-d0b4-4495-a44c-07a1331dcc0f","Type":"ContainerDied","Data":"54e4e54498971d5c9fb360958369f829c428201971d966116c3768f99ce31358"} Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.019832 4769 generic.go:334] "Generic (PLEG): container finished" podID="bd25577c-f800-4119-8e6f-20258030dfbc" containerID="4a020519aefc75cb906bbe26de0f7c58f76aab7f5eb5671179e31ca468fc1a6e" exitCode=0 Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.020261 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-w6f7r" event={"ID":"bd25577c-f800-4119-8e6f-20258030dfbc","Type":"ContainerDied","Data":"4a020519aefc75cb906bbe26de0f7c58f76aab7f5eb5671179e31ca468fc1a6e"} Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.711996 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.868049 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-dns-svc\") pod \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.869451 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-config\") pod \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.869660 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-dns-swift-storage-0\") pod \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.869865 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-ovsdbserver-nb\") pod \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.870002 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grtd6\" (UniqueName: \"kubernetes.io/projected/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-kube-api-access-grtd6\") pod \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.870298 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-ovsdbserver-sb\") pod \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\" (UID: \"a8c0623e-d0b4-4495-a44c-07a1331dcc0f\") " Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.933419 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-kube-api-access-grtd6" (OuterVolumeSpecName: "kube-api-access-grtd6") pod "a8c0623e-d0b4-4495-a44c-07a1331dcc0f" (UID: "a8c0623e-d0b4-4495-a44c-07a1331dcc0f"). InnerVolumeSpecName "kube-api-access-grtd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.964837 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a8c0623e-d0b4-4495-a44c-07a1331dcc0f" (UID: "a8c0623e-d0b4-4495-a44c-07a1331dcc0f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.972830 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grtd6\" (UniqueName: \"kubernetes.io/projected/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-kube-api-access-grtd6\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:07 crc kubenswrapper[4769]: I1125 10:09:07.972863 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.027742 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-config" (OuterVolumeSpecName: "config") pod "a8c0623e-d0b4-4495-a44c-07a1331dcc0f" (UID: "a8c0623e-d0b4-4495-a44c-07a1331dcc0f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.061464 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"33335ac3-fa4e-4a44-89b7-71083bcf7930","Type":"ContainerStarted","Data":"338a8423adc3403c6e5ee2124377a3e2a9e45462d9c3952d90d353ce929ed541"} Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.062741 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a8c0623e-d0b4-4495-a44c-07a1331dcc0f" (UID: "a8c0623e-d0b4-4495-a44c-07a1331dcc0f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.063902 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" event={"ID":"a8c0623e-d0b4-4495-a44c-07a1331dcc0f","Type":"ContainerDied","Data":"cb288b65ac7f6d13208ce78626170cf95fef4336ac70a8301459f9410404d72e"} Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.063973 4769 scope.go:117] "RemoveContainer" containerID="54e4e54498971d5c9fb360958369f829c428201971d966116c3768f99ce31358" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.064199 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f6bc4c6c9-tq8xm" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.068769 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a8c0623e-d0b4-4495-a44c-07a1331dcc0f" (UID: "a8c0623e-d0b4-4495-a44c-07a1331dcc0f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.075503 4769 generic.go:334] "Generic (PLEG): container finished" podID="8a7752ba-7e13-4f6c-af46-5df66ad668e3" containerID="509a1cc9c9e190c94b3b3647dac50326f2abe9ea6fab6c5352c013fd3308fe47" exitCode=0 Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.075612 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-xsqz9" event={"ID":"8a7752ba-7e13-4f6c-af46-5df66ad668e3","Type":"ContainerDied","Data":"509a1cc9c9e190c94b3b3647dac50326f2abe9ea6fab6c5352c013fd3308fe47"} Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.078103 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.078132 4769 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.078142 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.081112 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a8c0623e-d0b4-4495-a44c-07a1331dcc0f" (UID: "a8c0623e-d0b4-4495-a44c-07a1331dcc0f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.102583 4769 scope.go:117] "RemoveContainer" containerID="4142dfd1c74973e1732d6bfdfe2fcd10753e7a027e8a7cc8f80dce4f2cd0cbfd" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.180798 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a8c0623e-d0b4-4495-a44c-07a1331dcc0f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.414341 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-tq8xm"] Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.439405 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f6bc4c6c9-tq8xm"] Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.542890 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.593730 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-scripts\") pod \"bd25577c-f800-4119-8e6f-20258030dfbc\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.593991 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-68gnt\" (UniqueName: \"kubernetes.io/projected/bd25577c-f800-4119-8e6f-20258030dfbc-kube-api-access-68gnt\") pod \"bd25577c-f800-4119-8e6f-20258030dfbc\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.594041 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-config-data\") pod \"bd25577c-f800-4119-8e6f-20258030dfbc\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.594155 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-combined-ca-bundle\") pod \"bd25577c-f800-4119-8e6f-20258030dfbc\" (UID: \"bd25577c-f800-4119-8e6f-20258030dfbc\") " Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.608181 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd25577c-f800-4119-8e6f-20258030dfbc-kube-api-access-68gnt" (OuterVolumeSpecName: "kube-api-access-68gnt") pod "bd25577c-f800-4119-8e6f-20258030dfbc" (UID: "bd25577c-f800-4119-8e6f-20258030dfbc"). InnerVolumeSpecName "kube-api-access-68gnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.611605 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-scripts" (OuterVolumeSpecName: "scripts") pod "bd25577c-f800-4119-8e6f-20258030dfbc" (UID: "bd25577c-f800-4119-8e6f-20258030dfbc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.657435 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd25577c-f800-4119-8e6f-20258030dfbc" (UID: "bd25577c-f800-4119-8e6f-20258030dfbc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.686817 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-config-data" (OuterVolumeSpecName: "config-data") pod "bd25577c-f800-4119-8e6f-20258030dfbc" (UID: "bd25577c-f800-4119-8e6f-20258030dfbc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.698614 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-68gnt\" (UniqueName: \"kubernetes.io/projected/bd25577c-f800-4119-8e6f-20258030dfbc-kube-api-access-68gnt\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.698650 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.698660 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:08 crc kubenswrapper[4769]: I1125 10:09:08.698668 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd25577c-f800-4119-8e6f-20258030dfbc-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.100782 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-w6f7r" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.101125 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-w6f7r" event={"ID":"bd25577c-f800-4119-8e6f-20258030dfbc","Type":"ContainerDied","Data":"18c971d585408360d81763fb49e03a4a86bdcc9b26cc609eb61719a528949530"} Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.101219 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18c971d585408360d81763fb49e03a4a86bdcc9b26cc609eb61719a528949530" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.145557 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 10:09:09 crc kubenswrapper[4769]: E1125 10:09:09.146116 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd25577c-f800-4119-8e6f-20258030dfbc" containerName="nova-cell1-conductor-db-sync" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.146141 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd25577c-f800-4119-8e6f-20258030dfbc" containerName="nova-cell1-conductor-db-sync" Nov 25 10:09:09 crc kubenswrapper[4769]: E1125 10:09:09.146169 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c0623e-d0b4-4495-a44c-07a1331dcc0f" containerName="init" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.146178 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c0623e-d0b4-4495-a44c-07a1331dcc0f" containerName="init" Nov 25 10:09:09 crc kubenswrapper[4769]: E1125 10:09:09.146213 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8c0623e-d0b4-4495-a44c-07a1331dcc0f" containerName="dnsmasq-dns" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.146222 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8c0623e-d0b4-4495-a44c-07a1331dcc0f" containerName="dnsmasq-dns" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.146491 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8c0623e-d0b4-4495-a44c-07a1331dcc0f" containerName="dnsmasq-dns" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.146526 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd25577c-f800-4119-8e6f-20258030dfbc" containerName="nova-cell1-conductor-db-sync" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.148171 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.152644 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.170534 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.211446 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5e88fd3-747e-4e1c-8825-bacafa9d3530-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"f5e88fd3-747e-4e1c-8825-bacafa9d3530\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.211539 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vdjd\" (UniqueName: \"kubernetes.io/projected/f5e88fd3-747e-4e1c-8825-bacafa9d3530-kube-api-access-4vdjd\") pod \"nova-cell1-conductor-0\" (UID: \"f5e88fd3-747e-4e1c-8825-bacafa9d3530\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.211564 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5e88fd3-747e-4e1c-8825-bacafa9d3530-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"f5e88fd3-747e-4e1c-8825-bacafa9d3530\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.316015 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5e88fd3-747e-4e1c-8825-bacafa9d3530-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"f5e88fd3-747e-4e1c-8825-bacafa9d3530\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.316163 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vdjd\" (UniqueName: \"kubernetes.io/projected/f5e88fd3-747e-4e1c-8825-bacafa9d3530-kube-api-access-4vdjd\") pod \"nova-cell1-conductor-0\" (UID: \"f5e88fd3-747e-4e1c-8825-bacafa9d3530\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.316185 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5e88fd3-747e-4e1c-8825-bacafa9d3530-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"f5e88fd3-747e-4e1c-8825-bacafa9d3530\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.330143 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5e88fd3-747e-4e1c-8825-bacafa9d3530-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"f5e88fd3-747e-4e1c-8825-bacafa9d3530\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.347653 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5e88fd3-747e-4e1c-8825-bacafa9d3530-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"f5e88fd3-747e-4e1c-8825-bacafa9d3530\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.365359 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vdjd\" (UniqueName: \"kubernetes.io/projected/f5e88fd3-747e-4e1c-8825-bacafa9d3530-kube-api-access-4vdjd\") pod \"nova-cell1-conductor-0\" (UID: \"f5e88fd3-747e-4e1c-8825-bacafa9d3530\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.420134 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.421650 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.480202 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:09 crc kubenswrapper[4769]: I1125 10:09:09.731906 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.191997 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.225628 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="ceilometer-central-agent" containerID="cri-o://3aeae9b71b5932ba56ffa4c52bd7b7401c0770ec89df7fe9ba4e13a70549dc01" gracePeriod=30 Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.231119 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="proxy-httpd" containerID="cri-o://c293999bf19c2c03f3b1ede2528f334bcf427233bfced41a317c3b4f1fff5647" gracePeriod=30 Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.231206 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="sg-core" containerID="cri-o://33c538311f856ad3fdb6b4fcb1f7ddae812c9c0f6a75d5b569f0f9aec3f9be23" gracePeriod=30 Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.231261 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="ceilometer-notification-agent" containerID="cri-o://2568c2552eb4413283b6fc1e4392130452b5df9d981d7365fc46b56b8a8ff06e" gracePeriod=30 Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.303071 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8c0623e-d0b4-4495-a44c-07a1331dcc0f" path="/var/lib/kubelet/pods/a8c0623e-d0b4-4495-a44c-07a1331dcc0f/volumes" Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.401297 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.526732 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77n2s\" (UniqueName: \"kubernetes.io/projected/8a7752ba-7e13-4f6c-af46-5df66ad668e3-kube-api-access-77n2s\") pod \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.526781 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-scripts\") pod \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.526830 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-combined-ca-bundle\") pod \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.527344 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-config-data\") pod \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\" (UID: \"8a7752ba-7e13-4f6c-af46-5df66ad668e3\") " Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.537108 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a7752ba-7e13-4f6c-af46-5df66ad668e3-kube-api-access-77n2s" (OuterVolumeSpecName: "kube-api-access-77n2s") pod "8a7752ba-7e13-4f6c-af46-5df66ad668e3" (UID: "8a7752ba-7e13-4f6c-af46-5df66ad668e3"). InnerVolumeSpecName "kube-api-access-77n2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.539272 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-scripts" (OuterVolumeSpecName: "scripts") pod "8a7752ba-7e13-4f6c-af46-5df66ad668e3" (UID: "8a7752ba-7e13-4f6c-af46-5df66ad668e3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.608902 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-config-data" (OuterVolumeSpecName: "config-data") pod "8a7752ba-7e13-4f6c-af46-5df66ad668e3" (UID: "8a7752ba-7e13-4f6c-af46-5df66ad668e3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.628413 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a7752ba-7e13-4f6c-af46-5df66ad668e3" (UID: "8a7752ba-7e13-4f6c-af46-5df66ad668e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.630047 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.630077 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77n2s\" (UniqueName: \"kubernetes.io/projected/8a7752ba-7e13-4f6c-af46-5df66ad668e3-kube-api-access-77n2s\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.630089 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.630102 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a7752ba-7e13-4f6c-af46-5df66ad668e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.892900 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.929653 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:09:10 crc kubenswrapper[4769]: I1125 10:09:10.929726 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.125017 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"f5e88fd3-747e-4e1c-8825-bacafa9d3530","Type":"ContainerStarted","Data":"006007327a8b9e5b4cc614c8bc28f6e3dde7688554ecf57d65a8009ebf6c5b8f"} Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.125061 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"f5e88fd3-747e-4e1c-8825-bacafa9d3530","Type":"ContainerStarted","Data":"d3fd3cc03e45ebf24379ab69e3bdeba09917eb0162a122e6ea3239f8ca74733f"} Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.126202 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.127735 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-xsqz9" event={"ID":"8a7752ba-7e13-4f6c-af46-5df66ad668e3","Type":"ContainerDied","Data":"68ab05a04255c77339f7d230b7db0c94a464240cb55c58632ac0b4bf27bfc49a"} Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.127765 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68ab05a04255c77339f7d230b7db0c94a464240cb55c58632ac0b4bf27bfc49a" Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.127804 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-xsqz9" Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.132132 4769 generic.go:334] "Generic (PLEG): container finished" podID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerID="c293999bf19c2c03f3b1ede2528f334bcf427233bfced41a317c3b4f1fff5647" exitCode=0 Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.132171 4769 generic.go:334] "Generic (PLEG): container finished" podID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerID="33c538311f856ad3fdb6b4fcb1f7ddae812c9c0f6a75d5b569f0f9aec3f9be23" exitCode=2 Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.132179 4769 generic.go:334] "Generic (PLEG): container finished" podID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerID="2568c2552eb4413283b6fc1e4392130452b5df9d981d7365fc46b56b8a8ff06e" exitCode=0 Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.132227 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7","Type":"ContainerDied","Data":"c293999bf19c2c03f3b1ede2528f334bcf427233bfced41a317c3b4f1fff5647"} Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.132320 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7","Type":"ContainerDied","Data":"33c538311f856ad3fdb6b4fcb1f7ddae812c9c0f6a75d5b569f0f9aec3f9be23"} Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.132342 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7","Type":"ContainerDied","Data":"2568c2552eb4413283b6fc1e4392130452b5df9d981d7365fc46b56b8a8ff06e"} Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.136410 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"33335ac3-fa4e-4a44-89b7-71083bcf7930","Type":"ContainerStarted","Data":"09711f25fdaf8a7b325b3961e035985bbef939a9315d4c48e41b712279c2e69a"} Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.153276 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.153247603 podStartE2EDuration="2.153247603s" podCreationTimestamp="2025-11-25 10:09:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:09:11.144237662 +0000 UTC m=+1499.729209975" watchObservedRunningTime="2025-11-25 10:09:11.153247603 +0000 UTC m=+1499.738219916" Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.638734 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.639242 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="0f41ea94-9525-4a09-96e8-cd6cbc81278a" containerName="nova-scheduler-scheduler" containerID="cri-o://c236b72e809a4e5e305f9a7b1ec56438a38e8b53274cf98e0b9321ef3bf1b890" gracePeriod=30 Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.656274 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.656510 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" containerName="nova-api-log" containerID="cri-o://0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07" gracePeriod=30 Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.656905 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" containerName="nova-api-api" containerID="cri-o://45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf" gracePeriod=30 Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.664508 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.238:8774/\": EOF" Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.664517 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.238:8774/\": EOF" Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.679721 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.679987 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" containerName="nova-metadata-log" containerID="cri-o://3cd8e97579b9f23560f2f00166785db754d9cf1ffbd4d80c9acc0958c294713d" gracePeriod=30 Nov 25 10:09:11 crc kubenswrapper[4769]: I1125 10:09:11.680149 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" containerName="nova-metadata-metadata" containerID="cri-o://a1ad1b43aee177a2d262fa0522d2d1c326f0bd1a5d0dbca1571445337572cd1e" gracePeriod=30 Nov 25 10:09:12 crc kubenswrapper[4769]: I1125 10:09:12.162109 4769 generic.go:334] "Generic (PLEG): container finished" podID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" containerID="0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07" exitCode=143 Nov 25 10:09:12 crc kubenswrapper[4769]: I1125 10:09:12.162772 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb","Type":"ContainerDied","Data":"0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07"} Nov 25 10:09:12 crc kubenswrapper[4769]: I1125 10:09:12.167589 4769 generic.go:334] "Generic (PLEG): container finished" podID="c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" containerID="a1ad1b43aee177a2d262fa0522d2d1c326f0bd1a5d0dbca1571445337572cd1e" exitCode=0 Nov 25 10:09:12 crc kubenswrapper[4769]: I1125 10:09:12.167624 4769 generic.go:334] "Generic (PLEG): container finished" podID="c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" containerID="3cd8e97579b9f23560f2f00166785db754d9cf1ffbd4d80c9acc0958c294713d" exitCode=143 Nov 25 10:09:12 crc kubenswrapper[4769]: I1125 10:09:12.167951 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4","Type":"ContainerDied","Data":"a1ad1b43aee177a2d262fa0522d2d1c326f0bd1a5d0dbca1571445337572cd1e"} Nov 25 10:09:12 crc kubenswrapper[4769]: I1125 10:09:12.168037 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4","Type":"ContainerDied","Data":"3cd8e97579b9f23560f2f00166785db754d9cf1ffbd4d80c9acc0958c294713d"} Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.423105 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.556129 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-combined-ca-bundle\") pod \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.556468 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlw4m\" (UniqueName: \"kubernetes.io/projected/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-kube-api-access-nlw4m\") pod \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.556491 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-config-data\") pod \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.556547 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-nova-metadata-tls-certs\") pod \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.556581 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-logs\") pod \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\" (UID: \"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4\") " Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.557642 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-logs" (OuterVolumeSpecName: "logs") pod "c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" (UID: "c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.566128 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-kube-api-access-nlw4m" (OuterVolumeSpecName: "kube-api-access-nlw4m") pod "c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" (UID: "c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4"). InnerVolumeSpecName "kube-api-access-nlw4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.604232 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" (UID: "c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.616707 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-config-data" (OuterVolumeSpecName: "config-data") pod "c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" (UID: "c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.643214 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" (UID: "c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.659901 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlw4m\" (UniqueName: \"kubernetes.io/projected/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-kube-api-access-nlw4m\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.659943 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.659955 4769 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.659982 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:13 crc kubenswrapper[4769]: I1125 10:09:13.659997 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.288809 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4","Type":"ContainerDied","Data":"fbcc6dc78e5f29254c662250da77b60dc0532505ae6011f2b364f95bf54ce583"} Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.289529 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.289566 4769 scope.go:117] "RemoveContainer" containerID="a1ad1b43aee177a2d262fa0522d2d1c326f0bd1a5d0dbca1571445337572cd1e" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.299447 4769 generic.go:334] "Generic (PLEG): container finished" podID="0f41ea94-9525-4a09-96e8-cd6cbc81278a" containerID="c236b72e809a4e5e305f9a7b1ec56438a38e8b53274cf98e0b9321ef3bf1b890" exitCode=0 Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.299520 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0f41ea94-9525-4a09-96e8-cd6cbc81278a","Type":"ContainerDied","Data":"c236b72e809a4e5e305f9a7b1ec56438a38e8b53274cf98e0b9321ef3bf1b890"} Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.299566 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0f41ea94-9525-4a09-96e8-cd6cbc81278a","Type":"ContainerDied","Data":"12579da008ee1f345f74333625f575786aa3c5bcdfe1eeecca4bfede0c239c43"} Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.299580 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12579da008ee1f345f74333625f575786aa3c5bcdfe1eeecca4bfede0c239c43" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.319542 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"33335ac3-fa4e-4a44-89b7-71083bcf7930","Type":"ContainerStarted","Data":"ef2b07e27d68e43f372b0de88833c2acaaeee0ff597b28bd172ec305aafc588e"} Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.328331 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.381708 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.383923 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f41ea94-9525-4a09-96e8-cd6cbc81278a-combined-ca-bundle\") pod \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\" (UID: \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\") " Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.384136 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f41ea94-9525-4a09-96e8-cd6cbc81278a-config-data\") pod \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\" (UID: \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\") " Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.384183 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtvhh\" (UniqueName: \"kubernetes.io/projected/0f41ea94-9525-4a09-96e8-cd6cbc81278a-kube-api-access-jtvhh\") pod \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\" (UID: \"0f41ea94-9525-4a09-96e8-cd6cbc81278a\") " Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.392023 4769 scope.go:117] "RemoveContainer" containerID="3cd8e97579b9f23560f2f00166785db754d9cf1ffbd4d80c9acc0958c294713d" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.407653 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f41ea94-9525-4a09-96e8-cd6cbc81278a-kube-api-access-jtvhh" (OuterVolumeSpecName: "kube-api-access-jtvhh") pod "0f41ea94-9525-4a09-96e8-cd6cbc81278a" (UID: "0f41ea94-9525-4a09-96e8-cd6cbc81278a"). InnerVolumeSpecName "kube-api-access-jtvhh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.427718 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.443726 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:14 crc kubenswrapper[4769]: E1125 10:09:14.444471 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" containerName="nova-metadata-metadata" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.444496 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" containerName="nova-metadata-metadata" Nov 25 10:09:14 crc kubenswrapper[4769]: E1125 10:09:14.444529 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a7752ba-7e13-4f6c-af46-5df66ad668e3" containerName="nova-manage" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.444537 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a7752ba-7e13-4f6c-af46-5df66ad668e3" containerName="nova-manage" Nov 25 10:09:14 crc kubenswrapper[4769]: E1125 10:09:14.444570 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f41ea94-9525-4a09-96e8-cd6cbc81278a" containerName="nova-scheduler-scheduler" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.444577 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f41ea94-9525-4a09-96e8-cd6cbc81278a" containerName="nova-scheduler-scheduler" Nov 25 10:09:14 crc kubenswrapper[4769]: E1125 10:09:14.444608 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" containerName="nova-metadata-log" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.444615 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" containerName="nova-metadata-log" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.444906 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" containerName="nova-metadata-metadata" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.444931 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a7752ba-7e13-4f6c-af46-5df66ad668e3" containerName="nova-manage" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.444943 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f41ea94-9525-4a09-96e8-cd6cbc81278a" containerName="nova-scheduler-scheduler" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.444981 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" containerName="nova-metadata-log" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.446533 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.453670 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.453993 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.466924 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.495128 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.495236 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-config-data\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.495322 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.495527 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44a42f95-baf9-4949-a54e-bc3eadadc774-logs\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.495581 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v68z\" (UniqueName: \"kubernetes.io/projected/44a42f95-baf9-4949-a54e-bc3eadadc774-kube-api-access-2v68z\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.495744 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtvhh\" (UniqueName: \"kubernetes.io/projected/0f41ea94-9525-4a09-96e8-cd6cbc81278a-kube-api-access-jtvhh\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.506348 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f41ea94-9525-4a09-96e8-cd6cbc81278a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0f41ea94-9525-4a09-96e8-cd6cbc81278a" (UID: "0f41ea94-9525-4a09-96e8-cd6cbc81278a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.546424 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f41ea94-9525-4a09-96e8-cd6cbc81278a-config-data" (OuterVolumeSpecName: "config-data") pod "0f41ea94-9525-4a09-96e8-cd6cbc81278a" (UID: "0f41ea94-9525-4a09-96e8-cd6cbc81278a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.601513 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.601594 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-config-data\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.601638 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.602516 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44a42f95-baf9-4949-a54e-bc3eadadc774-logs\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.602549 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v68z\" (UniqueName: \"kubernetes.io/projected/44a42f95-baf9-4949-a54e-bc3eadadc774-kube-api-access-2v68z\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.602661 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f41ea94-9525-4a09-96e8-cd6cbc81278a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.602681 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f41ea94-9525-4a09-96e8-cd6cbc81278a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.604412 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44a42f95-baf9-4949-a54e-bc3eadadc774-logs\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.608722 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.612331 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.615160 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-config-data\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.620544 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v68z\" (UniqueName: \"kubernetes.io/projected/44a42f95-baf9-4949-a54e-bc3eadadc774-kube-api-access-2v68z\") pod \"nova-metadata-0\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " pod="openstack/nova-metadata-0" Nov 25 10:09:14 crc kubenswrapper[4769]: I1125 10:09:14.784584 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.339501 4769 generic.go:334] "Generic (PLEG): container finished" podID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerID="3aeae9b71b5932ba56ffa4c52bd7b7401c0770ec89df7fe9ba4e13a70549dc01" exitCode=0 Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.339671 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.344266 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7","Type":"ContainerDied","Data":"3aeae9b71b5932ba56ffa4c52bd7b7401c0770ec89df7fe9ba4e13a70549dc01"} Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.395521 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.418041 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.440657 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.444386 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.451268 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.477501 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.575585 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95t42\" (UniqueName: \"kubernetes.io/projected/74f2d1fa-6292-4290-9ad8-798f3c41bf97-kube-api-access-95t42\") pod \"nova-scheduler-0\" (UID: \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.576221 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74f2d1fa-6292-4290-9ad8-798f3c41bf97-config-data\") pod \"nova-scheduler-0\" (UID: \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.576356 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74f2d1fa-6292-4290-9ad8-798f3c41bf97-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.679665 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74f2d1fa-6292-4290-9ad8-798f3c41bf97-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.679773 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95t42\" (UniqueName: \"kubernetes.io/projected/74f2d1fa-6292-4290-9ad8-798f3c41bf97-kube-api-access-95t42\") pod \"nova-scheduler-0\" (UID: \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.679884 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74f2d1fa-6292-4290-9ad8-798f3c41bf97-config-data\") pod \"nova-scheduler-0\" (UID: \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.695829 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74f2d1fa-6292-4290-9ad8-798f3c41bf97-config-data\") pod \"nova-scheduler-0\" (UID: \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.699545 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74f2d1fa-6292-4290-9ad8-798f3c41bf97-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.700476 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95t42\" (UniqueName: \"kubernetes.io/projected/74f2d1fa-6292-4290-9ad8-798f3c41bf97-kube-api-access-95t42\") pod \"nova-scheduler-0\" (UID: \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:15 crc kubenswrapper[4769]: I1125 10:09:15.774070 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.305493 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f41ea94-9525-4a09-96e8-cd6cbc81278a" path="/var/lib/kubelet/pods/0f41ea94-9525-4a09-96e8-cd6cbc81278a/volumes" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.312507 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4" path="/var/lib/kubelet/pods/c5e4001a-e8bc-4aa9-97a7-1e4f6a2d2aa4/volumes" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.364400 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.379598 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7","Type":"ContainerDied","Data":"fc6f153fc79138569c535886823b873a27921f272edc9d587d3dbd04d34eb963"} Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.379670 4769 scope.go:117] "RemoveContainer" containerID="c293999bf19c2c03f3b1ede2528f334bcf427233bfced41a317c3b4f1fff5647" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.379697 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.403043 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"33335ac3-fa4e-4a44-89b7-71083bcf7930","Type":"ContainerStarted","Data":"75b5d69aa3c48413997bb55343b3fbd3f1aa389aed79c724735e83ddce28fcb3"} Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.403290 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-api" containerID="cri-o://338a8423adc3403c6e5ee2124377a3e2a9e45462d9c3952d90d353ce929ed541" gracePeriod=30 Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.403606 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-listener" containerID="cri-o://75b5d69aa3c48413997bb55343b3fbd3f1aa389aed79c724735e83ddce28fcb3" gracePeriod=30 Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.403661 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-notifier" containerID="cri-o://ef2b07e27d68e43f372b0de88833c2acaaeee0ff597b28bd172ec305aafc588e" gracePeriod=30 Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.403697 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-evaluator" containerID="cri-o://09711f25fdaf8a7b325b3961e035985bbef939a9315d4c48e41b712279c2e69a" gracePeriod=30 Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.420827 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.469019 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.530558948 podStartE2EDuration="11.468983431s" podCreationTimestamp="2025-11-25 10:09:05 +0000 UTC" firstStartedPulling="2025-11-25 10:09:06.711505585 +0000 UTC m=+1495.296477898" lastFinishedPulling="2025-11-25 10:09:15.649930068 +0000 UTC m=+1504.234902381" observedRunningTime="2025-11-25 10:09:16.455198527 +0000 UTC m=+1505.040170840" watchObservedRunningTime="2025-11-25 10:09:16.468983431 +0000 UTC m=+1505.053955744" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.479297 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-scripts\") pod \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.479394 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-log-httpd\") pod \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.479484 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-combined-ca-bundle\") pod \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.479811 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4kgf\" (UniqueName: \"kubernetes.io/projected/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-kube-api-access-l4kgf\") pod \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.479837 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-config-data\") pod \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.479869 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-run-httpd\") pod \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.479979 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-sg-core-conf-yaml\") pod \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\" (UID: \"49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7\") " Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.487059 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" (UID: "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.487408 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" (UID: "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.490643 4769 scope.go:117] "RemoveContainer" containerID="33c538311f856ad3fdb6b4fcb1f7ddae812c9c0f6a75d5b569f0f9aec3f9be23" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.519838 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-kube-api-access-l4kgf" (OuterVolumeSpecName: "kube-api-access-l4kgf") pod "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" (UID: "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7"). InnerVolumeSpecName "kube-api-access-l4kgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.519979 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-scripts" (OuterVolumeSpecName: "scripts") pod "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" (UID: "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.561919 4769 scope.go:117] "RemoveContainer" containerID="2568c2552eb4413283b6fc1e4392130452b5df9d981d7365fc46b56b8a8ff06e" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.596312 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" (UID: "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.617943 4769 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.618006 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4kgf\" (UniqueName: \"kubernetes.io/projected/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-kube-api-access-l4kgf\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.618038 4769 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.618057 4769 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.618069 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.644426 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" (UID: "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.721741 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.722841 4769 scope.go:117] "RemoveContainer" containerID="3aeae9b71b5932ba56ffa4c52bd7b7401c0770ec89df7fe9ba4e13a70549dc01" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.752244 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-config-data" (OuterVolumeSpecName: "config-data") pod "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" (UID: "49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.755252 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:09:16 crc kubenswrapper[4769]: W1125 10:09:16.795685 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74f2d1fa_6292_4290_9ad8_798f3c41bf97.slice/crio-fd16c50f0cfb156afabfa54fbae19ce0d9e037e0c211f5fc70d6f7e7fa051a11 WatchSource:0}: Error finding container fd16c50f0cfb156afabfa54fbae19ce0d9e037e0c211f5fc70d6f7e7fa051a11: Status 404 returned error can't find the container with id fd16c50f0cfb156afabfa54fbae19ce0d9e037e0c211f5fc70d6f7e7fa051a11 Nov 25 10:09:16 crc kubenswrapper[4769]: I1125 10:09:16.826225 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.085870 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.097944 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.129399 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:17 crc kubenswrapper[4769]: E1125 10:09:17.130028 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="sg-core" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.130056 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="sg-core" Nov 25 10:09:17 crc kubenswrapper[4769]: E1125 10:09:17.130080 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="proxy-httpd" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.130087 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="proxy-httpd" Nov 25 10:09:17 crc kubenswrapper[4769]: E1125 10:09:17.130110 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="ceilometer-central-agent" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.130117 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="ceilometer-central-agent" Nov 25 10:09:17 crc kubenswrapper[4769]: E1125 10:09:17.130129 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="ceilometer-notification-agent" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.130260 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="ceilometer-notification-agent" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.130514 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="ceilometer-central-agent" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.130544 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="sg-core" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.130558 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="ceilometer-notification-agent" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.130572 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" containerName="proxy-httpd" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.137366 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.140296 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.140502 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.148191 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.239146 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9adc355f-c2d4-4184-9229-605ea979b390-run-httpd\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.239239 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.239295 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-scripts\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.239359 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jljlf\" (UniqueName: \"kubernetes.io/projected/9adc355f-c2d4-4184-9229-605ea979b390-kube-api-access-jljlf\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.239383 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9adc355f-c2d4-4184-9229-605ea979b390-log-httpd\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.239409 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.239442 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-config-data\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.342866 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.343002 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-scripts\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.343100 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jljlf\" (UniqueName: \"kubernetes.io/projected/9adc355f-c2d4-4184-9229-605ea979b390-kube-api-access-jljlf\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.343125 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9adc355f-c2d4-4184-9229-605ea979b390-log-httpd\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.343155 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.343191 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-config-data\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.343245 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9adc355f-c2d4-4184-9229-605ea979b390-run-httpd\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.345409 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9adc355f-c2d4-4184-9229-605ea979b390-log-httpd\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.345794 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9adc355f-c2d4-4184-9229-605ea979b390-run-httpd\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.350451 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.353444 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-config-data\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.354477 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-scripts\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.359932 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.363001 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jljlf\" (UniqueName: \"kubernetes.io/projected/9adc355f-c2d4-4184-9229-605ea979b390-kube-api-access-jljlf\") pod \"ceilometer-0\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.429179 4769 generic.go:334] "Generic (PLEG): container finished" podID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerID="09711f25fdaf8a7b325b3961e035985bbef939a9315d4c48e41b712279c2e69a" exitCode=0 Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.429206 4769 generic.go:334] "Generic (PLEG): container finished" podID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerID="338a8423adc3403c6e5ee2124377a3e2a9e45462d9c3952d90d353ce929ed541" exitCode=0 Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.429242 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"33335ac3-fa4e-4a44-89b7-71083bcf7930","Type":"ContainerDied","Data":"09711f25fdaf8a7b325b3961e035985bbef939a9315d4c48e41b712279c2e69a"} Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.429265 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"33335ac3-fa4e-4a44-89b7-71083bcf7930","Type":"ContainerDied","Data":"338a8423adc3403c6e5ee2124377a3e2a9e45462d9c3952d90d353ce929ed541"} Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.432770 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"44a42f95-baf9-4949-a54e-bc3eadadc774","Type":"ContainerStarted","Data":"cb46e19254772a8cd0409a33a4f7288ff20128a4c094d2efde9e70ace0eb4c42"} Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.432794 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"44a42f95-baf9-4949-a54e-bc3eadadc774","Type":"ContainerStarted","Data":"966ac7d0e7e68e1320e6ef2313fc0465c6a1b9f7a0ae6dae13fab930dca6fe92"} Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.436925 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"74f2d1fa-6292-4290-9ad8-798f3c41bf97","Type":"ContainerStarted","Data":"8d927469f2ced3c9a1b43bb1f882271590dbbfc3d602246ab1dd43aa843627a1"} Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.436956 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"74f2d1fa-6292-4290-9ad8-798f3c41bf97","Type":"ContainerStarted","Data":"fd16c50f0cfb156afabfa54fbae19ce0d9e037e0c211f5fc70d6f7e7fa051a11"} Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.463328 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.494391 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.494355925 podStartE2EDuration="3.494355925s" podCreationTimestamp="2025-11-25 10:09:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:09:17.456430622 +0000 UTC m=+1506.041402925" watchObservedRunningTime="2025-11-25 10:09:17.494355925 +0000 UTC m=+1506.079328238" Nov 25 10:09:17 crc kubenswrapper[4769]: I1125 10:09:17.507191 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.507167773 podStartE2EDuration="2.507167773s" podCreationTimestamp="2025-11-25 10:09:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:09:17.50548311 +0000 UTC m=+1506.090455423" watchObservedRunningTime="2025-11-25 10:09:17.507167773 +0000 UTC m=+1506.092140086" Nov 25 10:09:18 crc kubenswrapper[4769]: I1125 10:09:18.007680 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:18 crc kubenswrapper[4769]: I1125 10:09:18.255120 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7" path="/var/lib/kubelet/pods/49f73e1f-e6d9-4855-8b3d-132a8c8dc7a7/volumes" Nov 25 10:09:18 crc kubenswrapper[4769]: I1125 10:09:18.469610 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9adc355f-c2d4-4184-9229-605ea979b390","Type":"ContainerStarted","Data":"166a6f53753ea368f35d94fd7166dd678ba5837c670c9cb847bef3cfad560644"} Nov 25 10:09:18 crc kubenswrapper[4769]: I1125 10:09:18.473777 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"44a42f95-baf9-4949-a54e-bc3eadadc774","Type":"ContainerStarted","Data":"9c6397bfe52edc42bffc88bb2a1b032782b7622c93584743e8c614f4c7d819b3"} Nov 25 10:09:19 crc kubenswrapper[4769]: I1125 10:09:19.493212 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9adc355f-c2d4-4184-9229-605ea979b390","Type":"ContainerStarted","Data":"c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec"} Nov 25 10:09:19 crc kubenswrapper[4769]: I1125 10:09:19.517440 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 25 10:09:19 crc kubenswrapper[4769]: I1125 10:09:19.788127 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:09:19 crc kubenswrapper[4769]: I1125 10:09:19.792947 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.096183 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.158732 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-logs\") pod \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.158815 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-config-data\") pod \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.158910 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nc6vn\" (UniqueName: \"kubernetes.io/projected/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-kube-api-access-nc6vn\") pod \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.159090 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-combined-ca-bundle\") pod \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\" (UID: \"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb\") " Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.160197 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-logs" (OuterVolumeSpecName: "logs") pod "5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" (UID: "5f8bf574-f4cc-49f2-97e4-dc87f8435ebb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.175400 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-kube-api-access-nc6vn" (OuterVolumeSpecName: "kube-api-access-nc6vn") pod "5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" (UID: "5f8bf574-f4cc-49f2-97e4-dc87f8435ebb"). InnerVolumeSpecName "kube-api-access-nc6vn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.208122 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-config-data" (OuterVolumeSpecName: "config-data") pod "5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" (UID: "5f8bf574-f4cc-49f2-97e4-dc87f8435ebb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.225683 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" (UID: "5f8bf574-f4cc-49f2-97e4-dc87f8435ebb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.271203 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nc6vn\" (UniqueName: \"kubernetes.io/projected/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-kube-api-access-nc6vn\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.271235 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.271246 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.271263 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.512263 4769 generic.go:334] "Generic (PLEG): container finished" podID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" containerID="45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf" exitCode=0 Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.512334 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.512341 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb","Type":"ContainerDied","Data":"45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf"} Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.513162 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5f8bf574-f4cc-49f2-97e4-dc87f8435ebb","Type":"ContainerDied","Data":"037e64df66e613b907428caa18414557cedbf38f827be07eb14158b1b36d774c"} Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.513188 4769 scope.go:117] "RemoveContainer" containerID="45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.526037 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9adc355f-c2d4-4184-9229-605ea979b390","Type":"ContainerStarted","Data":"6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed"} Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.552693 4769 scope.go:117] "RemoveContainer" containerID="0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.553029 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.563002 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.583085 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:20 crc kubenswrapper[4769]: E1125 10:09:20.583555 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" containerName="nova-api-log" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.583575 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" containerName="nova-api-log" Nov 25 10:09:20 crc kubenswrapper[4769]: E1125 10:09:20.583618 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" containerName="nova-api-api" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.583625 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" containerName="nova-api-api" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.583856 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" containerName="nova-api-api" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.583895 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" containerName="nova-api-log" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.585153 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.586299 4769 scope.go:117] "RemoveContainer" containerID="45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf" Nov 25 10:09:20 crc kubenswrapper[4769]: E1125 10:09:20.587012 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf\": container with ID starting with 45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf not found: ID does not exist" containerID="45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.587046 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf"} err="failed to get container status \"45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf\": rpc error: code = NotFound desc = could not find container \"45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf\": container with ID starting with 45017a98e570a049b2577793492b1ad8408d4c66ce111ec5bb5f67ad533d31cf not found: ID does not exist" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.587075 4769 scope.go:117] "RemoveContainer" containerID="0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07" Nov 25 10:09:20 crc kubenswrapper[4769]: E1125 10:09:20.593808 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07\": container with ID starting with 0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07 not found: ID does not exist" containerID="0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.593868 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07"} err="failed to get container status \"0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07\": rpc error: code = NotFound desc = could not find container \"0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07\": container with ID starting with 0618e911ba8be8f596aaa22da2ef18ce65ec658e44e1993a2a089680884d3d07 not found: ID does not exist" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.596054 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.617616 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.686997 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/738cf132-f225-43cb-a32d-b383bf9d4138-config-data\") pod \"nova-api-0\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.687400 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/738cf132-f225-43cb-a32d-b383bf9d4138-logs\") pod \"nova-api-0\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.687642 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjtcv\" (UniqueName: \"kubernetes.io/projected/738cf132-f225-43cb-a32d-b383bf9d4138-kube-api-access-sjtcv\") pod \"nova-api-0\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.687674 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/738cf132-f225-43cb-a32d-b383bf9d4138-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.774214 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.790593 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/738cf132-f225-43cb-a32d-b383bf9d4138-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.790662 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjtcv\" (UniqueName: \"kubernetes.io/projected/738cf132-f225-43cb-a32d-b383bf9d4138-kube-api-access-sjtcv\") pod \"nova-api-0\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.790814 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/738cf132-f225-43cb-a32d-b383bf9d4138-config-data\") pod \"nova-api-0\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.791053 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/738cf132-f225-43cb-a32d-b383bf9d4138-logs\") pod \"nova-api-0\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.791730 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/738cf132-f225-43cb-a32d-b383bf9d4138-logs\") pod \"nova-api-0\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.796401 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/738cf132-f225-43cb-a32d-b383bf9d4138-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.797520 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/738cf132-f225-43cb-a32d-b383bf9d4138-config-data\") pod \"nova-api-0\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.810771 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjtcv\" (UniqueName: \"kubernetes.io/projected/738cf132-f225-43cb-a32d-b383bf9d4138-kube-api-access-sjtcv\") pod \"nova-api-0\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " pod="openstack/nova-api-0" Nov 25 10:09:20 crc kubenswrapper[4769]: I1125 10:09:20.907183 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:09:21 crc kubenswrapper[4769]: I1125 10:09:21.457297 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:21 crc kubenswrapper[4769]: W1125 10:09:21.466361 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod738cf132_f225_43cb_a32d_b383bf9d4138.slice/crio-fe5a52dcf6fc0e1420613c0dd6f5c78c75231b245b10ff5eeb0569547d8661e0 WatchSource:0}: Error finding container fe5a52dcf6fc0e1420613c0dd6f5c78c75231b245b10ff5eeb0569547d8661e0: Status 404 returned error can't find the container with id fe5a52dcf6fc0e1420613c0dd6f5c78c75231b245b10ff5eeb0569547d8661e0 Nov 25 10:09:21 crc kubenswrapper[4769]: I1125 10:09:21.549713 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"738cf132-f225-43cb-a32d-b383bf9d4138","Type":"ContainerStarted","Data":"fe5a52dcf6fc0e1420613c0dd6f5c78c75231b245b10ff5eeb0569547d8661e0"} Nov 25 10:09:21 crc kubenswrapper[4769]: I1125 10:09:21.560443 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9adc355f-c2d4-4184-9229-605ea979b390","Type":"ContainerStarted","Data":"e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda"} Nov 25 10:09:22 crc kubenswrapper[4769]: I1125 10:09:22.259012 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f8bf574-f4cc-49f2-97e4-dc87f8435ebb" path="/var/lib/kubelet/pods/5f8bf574-f4cc-49f2-97e4-dc87f8435ebb/volumes" Nov 25 10:09:22 crc kubenswrapper[4769]: I1125 10:09:22.290310 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:09:22 crc kubenswrapper[4769]: I1125 10:09:22.290396 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:09:22 crc kubenswrapper[4769]: I1125 10:09:22.577539 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9adc355f-c2d4-4184-9229-605ea979b390","Type":"ContainerStarted","Data":"050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6"} Nov 25 10:09:22 crc kubenswrapper[4769]: I1125 10:09:22.580130 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:09:22 crc kubenswrapper[4769]: I1125 10:09:22.588079 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"738cf132-f225-43cb-a32d-b383bf9d4138","Type":"ContainerStarted","Data":"d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955"} Nov 25 10:09:22 crc kubenswrapper[4769]: I1125 10:09:22.588117 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"738cf132-f225-43cb-a32d-b383bf9d4138","Type":"ContainerStarted","Data":"2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771"} Nov 25 10:09:22 crc kubenswrapper[4769]: I1125 10:09:22.637912 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.7418034310000001 podStartE2EDuration="5.637876957s" podCreationTimestamp="2025-11-25 10:09:17 +0000 UTC" firstStartedPulling="2025-11-25 10:09:18.023843532 +0000 UTC m=+1506.608815835" lastFinishedPulling="2025-11-25 10:09:21.919917048 +0000 UTC m=+1510.504889361" observedRunningTime="2025-11-25 10:09:22.615954365 +0000 UTC m=+1511.200926698" watchObservedRunningTime="2025-11-25 10:09:22.637876957 +0000 UTC m=+1511.222849280" Nov 25 10:09:22 crc kubenswrapper[4769]: I1125 10:09:22.653445 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.653416186 podStartE2EDuration="2.653416186s" podCreationTimestamp="2025-11-25 10:09:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:09:22.639853178 +0000 UTC m=+1511.224825501" watchObservedRunningTime="2025-11-25 10:09:22.653416186 +0000 UTC m=+1511.238388499" Nov 25 10:09:24 crc kubenswrapper[4769]: I1125 10:09:24.787574 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 10:09:24 crc kubenswrapper[4769]: I1125 10:09:24.788557 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 10:09:25 crc kubenswrapper[4769]: I1125 10:09:25.776793 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 10:09:25 crc kubenswrapper[4769]: I1125 10:09:25.804075 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.248:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:09:25 crc kubenswrapper[4769]: I1125 10:09:25.804515 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.248:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:09:25 crc kubenswrapper[4769]: I1125 10:09:25.832911 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 10:09:26 crc kubenswrapper[4769]: I1125 10:09:26.754158 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 10:09:30 crc kubenswrapper[4769]: I1125 10:09:30.718796 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"27485288-1c80-435a-a272-592d87c79882","Type":"ContainerDied","Data":"f3fbf9236d34cdc902d87415a559cfec72b39ea8552cf1fba968a651fc672986"} Nov 25 10:09:30 crc kubenswrapper[4769]: I1125 10:09:30.718844 4769 generic.go:334] "Generic (PLEG): container finished" podID="27485288-1c80-435a-a272-592d87c79882" containerID="f3fbf9236d34cdc902d87415a559cfec72b39ea8552cf1fba968a651fc672986" exitCode=137 Nov 25 10:09:30 crc kubenswrapper[4769]: I1125 10:09:30.907468 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:09:30 crc kubenswrapper[4769]: I1125 10:09:30.907877 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.205163 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.347836 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27485288-1c80-435a-a272-592d87c79882-combined-ca-bundle\") pod \"27485288-1c80-435a-a272-592d87c79882\" (UID: \"27485288-1c80-435a-a272-592d87c79882\") " Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.348304 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27485288-1c80-435a-a272-592d87c79882-config-data\") pod \"27485288-1c80-435a-a272-592d87c79882\" (UID: \"27485288-1c80-435a-a272-592d87c79882\") " Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.348566 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gcclx\" (UniqueName: \"kubernetes.io/projected/27485288-1c80-435a-a272-592d87c79882-kube-api-access-gcclx\") pod \"27485288-1c80-435a-a272-592d87c79882\" (UID: \"27485288-1c80-435a-a272-592d87c79882\") " Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.369123 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27485288-1c80-435a-a272-592d87c79882-kube-api-access-gcclx" (OuterVolumeSpecName: "kube-api-access-gcclx") pod "27485288-1c80-435a-a272-592d87c79882" (UID: "27485288-1c80-435a-a272-592d87c79882"). InnerVolumeSpecName "kube-api-access-gcclx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.412080 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27485288-1c80-435a-a272-592d87c79882-config-data" (OuterVolumeSpecName: "config-data") pod "27485288-1c80-435a-a272-592d87c79882" (UID: "27485288-1c80-435a-a272-592d87c79882"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.414993 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27485288-1c80-435a-a272-592d87c79882-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "27485288-1c80-435a-a272-592d87c79882" (UID: "27485288-1c80-435a-a272-592d87c79882"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.452413 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27485288-1c80-435a-a272-592d87c79882-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.452448 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gcclx\" (UniqueName: \"kubernetes.io/projected/27485288-1c80-435a-a272-592d87c79882-kube-api-access-gcclx\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.452460 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27485288-1c80-435a-a272-592d87c79882-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.770746 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"27485288-1c80-435a-a272-592d87c79882","Type":"ContainerDied","Data":"a4ec4a6939c8c09ee2895e82167711acca8d5ef529242c4adf4874d8e243e0f3"} Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.770811 4769 scope.go:117] "RemoveContainer" containerID="f3fbf9236d34cdc902d87415a559cfec72b39ea8552cf1fba968a651fc672986" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.771038 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.819700 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.843239 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.857222 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:09:31 crc kubenswrapper[4769]: E1125 10:09:31.857918 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27485288-1c80-435a-a272-592d87c79882" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.857942 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="27485288-1c80-435a-a272-592d87c79882" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.858230 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="27485288-1c80-435a-a272-592d87c79882" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.860959 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.868759 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.872449 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.872730 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.873403 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.973710 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4eb5499-66c0-466e-92ab-1f2a223d1f35-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.973844 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4eb5499-66c0-466e-92ab-1f2a223d1f35-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.973879 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4eb5499-66c0-466e-92ab-1f2a223d1f35-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.974297 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4eb5499-66c0-466e-92ab-1f2a223d1f35-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:31 crc kubenswrapper[4769]: I1125 10:09:31.974558 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mntj4\" (UniqueName: \"kubernetes.io/projected/d4eb5499-66c0-466e-92ab-1f2a223d1f35-kube-api-access-mntj4\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.006331 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="738cf132-f225-43cb-a32d-b383bf9d4138" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.251:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.006642 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="738cf132-f225-43cb-a32d-b383bf9d4138" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.251:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.077399 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mntj4\" (UniqueName: \"kubernetes.io/projected/d4eb5499-66c0-466e-92ab-1f2a223d1f35-kube-api-access-mntj4\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.077666 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4eb5499-66c0-466e-92ab-1f2a223d1f35-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.077713 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4eb5499-66c0-466e-92ab-1f2a223d1f35-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.077744 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4eb5499-66c0-466e-92ab-1f2a223d1f35-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.077842 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4eb5499-66c0-466e-92ab-1f2a223d1f35-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.084784 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4eb5499-66c0-466e-92ab-1f2a223d1f35-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.086064 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4eb5499-66c0-466e-92ab-1f2a223d1f35-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.093423 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4eb5499-66c0-466e-92ab-1f2a223d1f35-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.110009 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4eb5499-66c0-466e-92ab-1f2a223d1f35-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.120638 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mntj4\" (UniqueName: \"kubernetes.io/projected/d4eb5499-66c0-466e-92ab-1f2a223d1f35-kube-api-access-mntj4\") pod \"nova-cell1-novncproxy-0\" (UID: \"d4eb5499-66c0-466e-92ab-1f2a223d1f35\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.185301 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.252045 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27485288-1c80-435a-a272-592d87c79882" path="/var/lib/kubelet/pods/27485288-1c80-435a-a272-592d87c79882/volumes" Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.704521 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:09:32 crc kubenswrapper[4769]: I1125 10:09:32.797798 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d4eb5499-66c0-466e-92ab-1f2a223d1f35","Type":"ContainerStarted","Data":"00009293569fb75dc41bc62bcfe76a9c7abc5f14ab48c0fa70097771e0e7b102"} Nov 25 10:09:33 crc kubenswrapper[4769]: I1125 10:09:33.822801 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d4eb5499-66c0-466e-92ab-1f2a223d1f35","Type":"ContainerStarted","Data":"2782542f164a5a09ca6cd291298118b41ad256bf1400f770000d581153183fe1"} Nov 25 10:09:33 crc kubenswrapper[4769]: I1125 10:09:33.852952 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.8529227390000003 podStartE2EDuration="2.852922739s" podCreationTimestamp="2025-11-25 10:09:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:09:33.845753675 +0000 UTC m=+1522.430726028" watchObservedRunningTime="2025-11-25 10:09:33.852922739 +0000 UTC m=+1522.437895082" Nov 25 10:09:34 crc kubenswrapper[4769]: I1125 10:09:34.799297 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 10:09:34 crc kubenswrapper[4769]: I1125 10:09:34.807844 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 10:09:34 crc kubenswrapper[4769]: I1125 10:09:34.809993 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 10:09:34 crc kubenswrapper[4769]: I1125 10:09:34.853756 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 10:09:37 crc kubenswrapper[4769]: I1125 10:09:37.186206 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:40 crc kubenswrapper[4769]: I1125 10:09:40.911995 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 10:09:40 crc kubenswrapper[4769]: I1125 10:09:40.912880 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 10:09:40 crc kubenswrapper[4769]: I1125 10:09:40.913533 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:09:40 crc kubenswrapper[4769]: I1125 10:09:40.913584 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:09:40 crc kubenswrapper[4769]: I1125 10:09:40.915957 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 10:09:40 crc kubenswrapper[4769]: I1125 10:09:40.918640 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.204926 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-bt68p"] Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.207427 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.217529 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-bt68p"] Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.427348 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-config\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.427429 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7x4lv\" (UniqueName: \"kubernetes.io/projected/d52e129a-837d-4574-aa08-69857eb9109f-kube-api-access-7x4lv\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.427702 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.428192 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.428246 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.428270 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.532002 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.532317 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.532355 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.532397 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.532484 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-config\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.532595 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7x4lv\" (UniqueName: \"kubernetes.io/projected/d52e129a-837d-4574-aa08-69857eb9109f-kube-api-access-7x4lv\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.533187 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-dns-swift-storage-0\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.533379 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-ovsdbserver-sb\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.533740 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-ovsdbserver-nb\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.533774 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-config\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.534712 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-dns-svc\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.564591 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7x4lv\" (UniqueName: \"kubernetes.io/projected/d52e129a-837d-4574-aa08-69857eb9109f-kube-api-access-7x4lv\") pod \"dnsmasq-dns-79b5d74c8c-bt68p\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:41 crc kubenswrapper[4769]: I1125 10:09:41.843981 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:42 crc kubenswrapper[4769]: I1125 10:09:42.186329 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:42 crc kubenswrapper[4769]: I1125 10:09:42.214381 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:42 crc kubenswrapper[4769]: I1125 10:09:42.457193 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-bt68p"] Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.007256 4769 generic.go:334] "Generic (PLEG): container finished" podID="d52e129a-837d-4574-aa08-69857eb9109f" containerID="6e129b0ee0b673a6111cbe4d8de6d7f20c479802392271c2dd26afc6eb73ad49" exitCode=0 Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.007439 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" event={"ID":"d52e129a-837d-4574-aa08-69857eb9109f","Type":"ContainerDied","Data":"6e129b0ee0b673a6111cbe4d8de6d7f20c479802392271c2dd26afc6eb73ad49"} Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.007574 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" event={"ID":"d52e129a-837d-4574-aa08-69857eb9109f","Type":"ContainerStarted","Data":"163897805c6c3c0b952a92162e13cb26b790b874731cc3be233f5c9de1507090"} Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.382598 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.683547 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-qh9h6"] Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.686877 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.690145 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.691358 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.706863 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-qh9h6"] Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.805800 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-config-data\") pod \"nova-cell1-cell-mapping-qh9h6\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.806072 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-qh9h6\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.806100 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-scripts\") pod \"nova-cell1-cell-mapping-qh9h6\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.806136 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5gtr\" (UniqueName: \"kubernetes.io/projected/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-kube-api-access-j5gtr\") pod \"nova-cell1-cell-mapping-qh9h6\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.906543 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.907388 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="738cf132-f225-43cb-a32d-b383bf9d4138" containerName="nova-api-api" containerID="cri-o://d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955" gracePeriod=30 Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.907706 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="738cf132-f225-43cb-a32d-b383bf9d4138" containerName="nova-api-log" containerID="cri-o://2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771" gracePeriod=30 Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.908999 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-config-data\") pod \"nova-cell1-cell-mapping-qh9h6\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.909246 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-qh9h6\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.909282 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-scripts\") pod \"nova-cell1-cell-mapping-qh9h6\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.909316 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5gtr\" (UniqueName: \"kubernetes.io/projected/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-kube-api-access-j5gtr\") pod \"nova-cell1-cell-mapping-qh9h6\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.918951 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-config-data\") pod \"nova-cell1-cell-mapping-qh9h6\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.921531 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-qh9h6\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.927581 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-scripts\") pod \"nova-cell1-cell-mapping-qh9h6\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:43 crc kubenswrapper[4769]: I1125 10:09:43.962257 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5gtr\" (UniqueName: \"kubernetes.io/projected/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-kube-api-access-j5gtr\") pod \"nova-cell1-cell-mapping-qh9h6\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:44 crc kubenswrapper[4769]: I1125 10:09:44.060454 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:44 crc kubenswrapper[4769]: I1125 10:09:44.068726 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" event={"ID":"d52e129a-837d-4574-aa08-69857eb9109f","Type":"ContainerStarted","Data":"3d78941dc9dee668e4013ce2a77c316e616f77de5def1c04d01d33d39d7290ab"} Nov 25 10:09:44 crc kubenswrapper[4769]: I1125 10:09:44.118349 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" podStartSLOduration=3.11832049 podStartE2EDuration="3.11832049s" podCreationTimestamp="2025-11-25 10:09:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:09:44.08751593 +0000 UTC m=+1532.672488243" watchObservedRunningTime="2025-11-25 10:09:44.11832049 +0000 UTC m=+1532.703292803" Nov 25 10:09:44 crc kubenswrapper[4769]: I1125 10:09:44.456945 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:44 crc kubenswrapper[4769]: I1125 10:09:44.457895 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="ceilometer-central-agent" containerID="cri-o://c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec" gracePeriod=30 Nov 25 10:09:44 crc kubenswrapper[4769]: I1125 10:09:44.458003 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="proxy-httpd" containerID="cri-o://050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6" gracePeriod=30 Nov 25 10:09:44 crc kubenswrapper[4769]: I1125 10:09:44.458040 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="sg-core" containerID="cri-o://e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda" gracePeriod=30 Nov 25 10:09:44 crc kubenswrapper[4769]: I1125 10:09:44.458066 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="ceilometer-notification-agent" containerID="cri-o://6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed" gracePeriod=30 Nov 25 10:09:44 crc kubenswrapper[4769]: I1125 10:09:44.564693 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.250:3000/\": read tcp 10.217.0.2:47126->10.217.0.250:3000: read: connection reset by peer" Nov 25 10:09:44 crc kubenswrapper[4769]: W1125 10:09:44.595766 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28b6e605_9cd8_4ab4_ab4c_7f9aeb7c1158.slice/crio-0a0964eb5908b01082d139121090e19eae779b8d823f045faa5007cddacff3e2 WatchSource:0}: Error finding container 0a0964eb5908b01082d139121090e19eae779b8d823f045faa5007cddacff3e2: Status 404 returned error can't find the container with id 0a0964eb5908b01082d139121090e19eae779b8d823f045faa5007cddacff3e2 Nov 25 10:09:44 crc kubenswrapper[4769]: I1125 10:09:44.630348 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-qh9h6"] Nov 25 10:09:45 crc kubenswrapper[4769]: I1125 10:09:45.088286 4769 generic.go:334] "Generic (PLEG): container finished" podID="738cf132-f225-43cb-a32d-b383bf9d4138" containerID="2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771" exitCode=143 Nov 25 10:09:45 crc kubenswrapper[4769]: I1125 10:09:45.088389 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"738cf132-f225-43cb-a32d-b383bf9d4138","Type":"ContainerDied","Data":"2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771"} Nov 25 10:09:45 crc kubenswrapper[4769]: I1125 10:09:45.091658 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-qh9h6" event={"ID":"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158","Type":"ContainerStarted","Data":"b50fe91ee9b296b81fe73d33f1dd9caa29a3905ac6a627ba3d332b6c85ace7f0"} Nov 25 10:09:45 crc kubenswrapper[4769]: I1125 10:09:45.091720 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-qh9h6" event={"ID":"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158","Type":"ContainerStarted","Data":"0a0964eb5908b01082d139121090e19eae779b8d823f045faa5007cddacff3e2"} Nov 25 10:09:45 crc kubenswrapper[4769]: I1125 10:09:45.094085 4769 generic.go:334] "Generic (PLEG): container finished" podID="9adc355f-c2d4-4184-9229-605ea979b390" containerID="050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6" exitCode=0 Nov 25 10:09:45 crc kubenswrapper[4769]: I1125 10:09:45.094118 4769 generic.go:334] "Generic (PLEG): container finished" podID="9adc355f-c2d4-4184-9229-605ea979b390" containerID="e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda" exitCode=2 Nov 25 10:09:45 crc kubenswrapper[4769]: I1125 10:09:45.094129 4769 generic.go:334] "Generic (PLEG): container finished" podID="9adc355f-c2d4-4184-9229-605ea979b390" containerID="c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec" exitCode=0 Nov 25 10:09:45 crc kubenswrapper[4769]: I1125 10:09:45.094285 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9adc355f-c2d4-4184-9229-605ea979b390","Type":"ContainerDied","Data":"050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6"} Nov 25 10:09:45 crc kubenswrapper[4769]: I1125 10:09:45.094320 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9adc355f-c2d4-4184-9229-605ea979b390","Type":"ContainerDied","Data":"e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda"} Nov 25 10:09:45 crc kubenswrapper[4769]: I1125 10:09:45.094331 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9adc355f-c2d4-4184-9229-605ea979b390","Type":"ContainerDied","Data":"c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec"} Nov 25 10:09:45 crc kubenswrapper[4769]: I1125 10:09:45.094423 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:45 crc kubenswrapper[4769]: I1125 10:09:45.114467 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-qh9h6" podStartSLOduration=2.114445533 podStartE2EDuration="2.114445533s" podCreationTimestamp="2025-11-25 10:09:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:09:45.113650183 +0000 UTC m=+1533.698622496" watchObservedRunningTime="2025-11-25 10:09:45.114445533 +0000 UTC m=+1533.699417856" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.163173 4769 generic.go:334] "Generic (PLEG): container finished" podID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerID="75b5d69aa3c48413997bb55343b3fbd3f1aa389aed79c724735e83ddce28fcb3" exitCode=137 Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.164747 4769 generic.go:334] "Generic (PLEG): container finished" podID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerID="ef2b07e27d68e43f372b0de88833c2acaaeee0ff597b28bd172ec305aafc588e" exitCode=137 Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.163522 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"33335ac3-fa4e-4a44-89b7-71083bcf7930","Type":"ContainerDied","Data":"75b5d69aa3c48413997bb55343b3fbd3f1aa389aed79c724735e83ddce28fcb3"} Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.164895 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"33335ac3-fa4e-4a44-89b7-71083bcf7930","Type":"ContainerDied","Data":"ef2b07e27d68e43f372b0de88833c2acaaeee0ff597b28bd172ec305aafc588e"} Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.303051 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.363090 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9r4v4\" (UniqueName: \"kubernetes.io/projected/33335ac3-fa4e-4a44-89b7-71083bcf7930-kube-api-access-9r4v4\") pod \"33335ac3-fa4e-4a44-89b7-71083bcf7930\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.363250 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-scripts\") pod \"33335ac3-fa4e-4a44-89b7-71083bcf7930\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.363400 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-config-data\") pod \"33335ac3-fa4e-4a44-89b7-71083bcf7930\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.363430 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-combined-ca-bundle\") pod \"33335ac3-fa4e-4a44-89b7-71083bcf7930\" (UID: \"33335ac3-fa4e-4a44-89b7-71083bcf7930\") " Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.392912 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-scripts" (OuterVolumeSpecName: "scripts") pod "33335ac3-fa4e-4a44-89b7-71083bcf7930" (UID: "33335ac3-fa4e-4a44-89b7-71083bcf7930"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.409756 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33335ac3-fa4e-4a44-89b7-71083bcf7930-kube-api-access-9r4v4" (OuterVolumeSpecName: "kube-api-access-9r4v4") pod "33335ac3-fa4e-4a44-89b7-71083bcf7930" (UID: "33335ac3-fa4e-4a44-89b7-71083bcf7930"). InnerVolumeSpecName "kube-api-access-9r4v4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.465363 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.250:3000/\": dial tcp 10.217.0.250:3000: connect: connection refused" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.468067 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9r4v4\" (UniqueName: \"kubernetes.io/projected/33335ac3-fa4e-4a44-89b7-71083bcf7930-kube-api-access-9r4v4\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.468084 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.571351 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.640024 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33335ac3-fa4e-4a44-89b7-71083bcf7930" (UID: "33335ac3-fa4e-4a44-89b7-71083bcf7930"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.644551 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-config-data" (OuterVolumeSpecName: "config-data") pod "33335ac3-fa4e-4a44-89b7-71083bcf7930" (UID: "33335ac3-fa4e-4a44-89b7-71083bcf7930"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.678056 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/738cf132-f225-43cb-a32d-b383bf9d4138-logs\") pod \"738cf132-f225-43cb-a32d-b383bf9d4138\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.678149 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/738cf132-f225-43cb-a32d-b383bf9d4138-config-data\") pod \"738cf132-f225-43cb-a32d-b383bf9d4138\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.678286 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/738cf132-f225-43cb-a32d-b383bf9d4138-combined-ca-bundle\") pod \"738cf132-f225-43cb-a32d-b383bf9d4138\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.678320 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjtcv\" (UniqueName: \"kubernetes.io/projected/738cf132-f225-43cb-a32d-b383bf9d4138-kube-api-access-sjtcv\") pod \"738cf132-f225-43cb-a32d-b383bf9d4138\" (UID: \"738cf132-f225-43cb-a32d-b383bf9d4138\") " Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.678775 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.678794 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33335ac3-fa4e-4a44-89b7-71083bcf7930-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.682730 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/738cf132-f225-43cb-a32d-b383bf9d4138-logs" (OuterVolumeSpecName: "logs") pod "738cf132-f225-43cb-a32d-b383bf9d4138" (UID: "738cf132-f225-43cb-a32d-b383bf9d4138"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.683059 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/738cf132-f225-43cb-a32d-b383bf9d4138-kube-api-access-sjtcv" (OuterVolumeSpecName: "kube-api-access-sjtcv") pod "738cf132-f225-43cb-a32d-b383bf9d4138" (UID: "738cf132-f225-43cb-a32d-b383bf9d4138"). InnerVolumeSpecName "kube-api-access-sjtcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.716223 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/738cf132-f225-43cb-a32d-b383bf9d4138-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "738cf132-f225-43cb-a32d-b383bf9d4138" (UID: "738cf132-f225-43cb-a32d-b383bf9d4138"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.743593 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/738cf132-f225-43cb-a32d-b383bf9d4138-config-data" (OuterVolumeSpecName: "config-data") pod "738cf132-f225-43cb-a32d-b383bf9d4138" (UID: "738cf132-f225-43cb-a32d-b383bf9d4138"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.781772 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/738cf132-f225-43cb-a32d-b383bf9d4138-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.781922 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/738cf132-f225-43cb-a32d-b383bf9d4138-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.781940 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/738cf132-f225-43cb-a32d-b383bf9d4138-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:47 crc kubenswrapper[4769]: I1125 10:09:47.781978 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjtcv\" (UniqueName: \"kubernetes.io/projected/738cf132-f225-43cb-a32d-b383bf9d4138-kube-api-access-sjtcv\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.172211 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.190562 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9adc355f-c2d4-4184-9229-605ea979b390-log-httpd\") pod \"9adc355f-c2d4-4184-9229-605ea979b390\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.190637 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-config-data\") pod \"9adc355f-c2d4-4184-9229-605ea979b390\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.190916 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-sg-core-conf-yaml\") pod \"9adc355f-c2d4-4184-9229-605ea979b390\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.191012 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9adc355f-c2d4-4184-9229-605ea979b390-run-httpd\") pod \"9adc355f-c2d4-4184-9229-605ea979b390\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.191103 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-combined-ca-bundle\") pod \"9adc355f-c2d4-4184-9229-605ea979b390\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.191191 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-scripts\") pod \"9adc355f-c2d4-4184-9229-605ea979b390\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.191314 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jljlf\" (UniqueName: \"kubernetes.io/projected/9adc355f-c2d4-4184-9229-605ea979b390-kube-api-access-jljlf\") pod \"9adc355f-c2d4-4184-9229-605ea979b390\" (UID: \"9adc355f-c2d4-4184-9229-605ea979b390\") " Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.191699 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9adc355f-c2d4-4184-9229-605ea979b390-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9adc355f-c2d4-4184-9229-605ea979b390" (UID: "9adc355f-c2d4-4184-9229-605ea979b390"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.191908 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9adc355f-c2d4-4184-9229-605ea979b390-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9adc355f-c2d4-4184-9229-605ea979b390" (UID: "9adc355f-c2d4-4184-9229-605ea979b390"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.202348 4769 generic.go:334] "Generic (PLEG): container finished" podID="738cf132-f225-43cb-a32d-b383bf9d4138" containerID="d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955" exitCode=0 Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.202481 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"738cf132-f225-43cb-a32d-b383bf9d4138","Type":"ContainerDied","Data":"d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955"} Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.202524 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"738cf132-f225-43cb-a32d-b383bf9d4138","Type":"ContainerDied","Data":"fe5a52dcf6fc0e1420613c0dd6f5c78c75231b245b10ff5eeb0569547d8661e0"} Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.202526 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.202547 4769 scope.go:117] "RemoveContainer" containerID="d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.209599 4769 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9adc355f-c2d4-4184-9229-605ea979b390-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.209648 4769 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9adc355f-c2d4-4184-9229-605ea979b390-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.233905 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-scripts" (OuterVolumeSpecName: "scripts") pod "9adc355f-c2d4-4184-9229-605ea979b390" (UID: "9adc355f-c2d4-4184-9229-605ea979b390"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.244413 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9adc355f-c2d4-4184-9229-605ea979b390-kube-api-access-jljlf" (OuterVolumeSpecName: "kube-api-access-jljlf") pod "9adc355f-c2d4-4184-9229-605ea979b390" (UID: "9adc355f-c2d4-4184-9229-605ea979b390"). InnerVolumeSpecName "kube-api-access-jljlf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.245364 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.248136 4769 generic.go:334] "Generic (PLEG): container finished" podID="9adc355f-c2d4-4184-9229-605ea979b390" containerID="6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed" exitCode=0 Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.248257 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.256697 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9adc355f-c2d4-4184-9229-605ea979b390" (UID: "9adc355f-c2d4-4184-9229-605ea979b390"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.279834 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"33335ac3-fa4e-4a44-89b7-71083bcf7930","Type":"ContainerDied","Data":"d4d1c69d86123e49c9f408cc10dc0d8db15e112a622aeff35d69b31af9002784"} Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.279921 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9adc355f-c2d4-4184-9229-605ea979b390","Type":"ContainerDied","Data":"6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed"} Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.279941 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9adc355f-c2d4-4184-9229-605ea979b390","Type":"ContainerDied","Data":"166a6f53753ea368f35d94fd7166dd678ba5837c670c9cb847bef3cfad560644"} Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.315770 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.315805 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jljlf\" (UniqueName: \"kubernetes.io/projected/9adc355f-c2d4-4184-9229-605ea979b390-kube-api-access-jljlf\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.315819 4769 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.331897 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.347297 4769 scope.go:117] "RemoveContainer" containerID="2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.396072 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.411419 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:48 crc kubenswrapper[4769]: E1125 10:09:48.412242 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="sg-core" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412263 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="sg-core" Nov 25 10:09:48 crc kubenswrapper[4769]: E1125 10:09:48.412281 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="738cf132-f225-43cb-a32d-b383bf9d4138" containerName="nova-api-api" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412287 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="738cf132-f225-43cb-a32d-b383bf9d4138" containerName="nova-api-api" Nov 25 10:09:48 crc kubenswrapper[4769]: E1125 10:09:48.412297 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-notifier" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412303 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-notifier" Nov 25 10:09:48 crc kubenswrapper[4769]: E1125 10:09:48.412337 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-evaluator" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412343 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-evaluator" Nov 25 10:09:48 crc kubenswrapper[4769]: E1125 10:09:48.412356 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="ceilometer-central-agent" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412362 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="ceilometer-central-agent" Nov 25 10:09:48 crc kubenswrapper[4769]: E1125 10:09:48.412374 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-api" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412380 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-api" Nov 25 10:09:48 crc kubenswrapper[4769]: E1125 10:09:48.412396 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-listener" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412401 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-listener" Nov 25 10:09:48 crc kubenswrapper[4769]: E1125 10:09:48.412422 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="ceilometer-notification-agent" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412428 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="ceilometer-notification-agent" Nov 25 10:09:48 crc kubenswrapper[4769]: E1125 10:09:48.412438 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="738cf132-f225-43cb-a32d-b383bf9d4138" containerName="nova-api-log" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412444 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="738cf132-f225-43cb-a32d-b383bf9d4138" containerName="nova-api-log" Nov 25 10:09:48 crc kubenswrapper[4769]: E1125 10:09:48.412464 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="proxy-httpd" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412473 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="proxy-httpd" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412764 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-listener" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412794 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="ceilometer-central-agent" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412809 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="proxy-httpd" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412820 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-api" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412833 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="738cf132-f225-43cb-a32d-b383bf9d4138" containerName="nova-api-log" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412843 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-notifier" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412853 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" containerName="aodh-evaluator" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412865 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="738cf132-f225-43cb-a32d-b383bf9d4138" containerName="nova-api-api" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412873 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="sg-core" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.412886 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="9adc355f-c2d4-4184-9229-605ea979b390" containerName="ceilometer-notification-agent" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.431807 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.433235 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.437797 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.440478 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9adc355f-c2d4-4184-9229-605ea979b390" (UID: "9adc355f-c2d4-4184-9229-605ea979b390"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.444152 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.444440 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.444562 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.451208 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.453836 4769 scope.go:117] "RemoveContainer" containerID="d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955" Nov 25 10:09:48 crc kubenswrapper[4769]: E1125 10:09:48.458228 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955\": container with ID starting with d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955 not found: ID does not exist" containerID="d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.458307 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955"} err="failed to get container status \"d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955\": rpc error: code = NotFound desc = could not find container \"d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955\": container with ID starting with d3cce1ed884724eeee4a4b668d4c1542fcaf7485d74d7903449041f9d26ce955 not found: ID does not exist" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.458347 4769 scope.go:117] "RemoveContainer" containerID="2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771" Nov 25 10:09:48 crc kubenswrapper[4769]: E1125 10:09:48.459117 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771\": container with ID starting with 2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771 not found: ID does not exist" containerID="2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.459173 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771"} err="failed to get container status \"2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771\": rpc error: code = NotFound desc = could not find container \"2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771\": container with ID starting with 2fc2269802b6f448bb00cdcbb1e351d8ad8e6031f3cab54f99594cf9da7d7771 not found: ID does not exist" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.459213 4769 scope.go:117] "RemoveContainer" containerID="75b5d69aa3c48413997bb55343b3fbd3f1aa389aed79c724735e83ddce28fcb3" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.466142 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.472155 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.477638 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-5nnz9" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.477793 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.477978 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.477995 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.478123 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.483639 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.502248 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-config-data" (OuterVolumeSpecName: "config-data") pod "9adc355f-c2d4-4184-9229-605ea979b390" (UID: "9adc355f-c2d4-4184-9229-605ea979b390"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.517268 4769 scope.go:117] "RemoveContainer" containerID="ef2b07e27d68e43f372b0de88833c2acaaeee0ff597b28bd172ec305aafc588e" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.522234 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.522263 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9adc355f-c2d4-4184-9229-605ea979b390-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.561421 4769 scope.go:117] "RemoveContainer" containerID="09711f25fdaf8a7b325b3961e035985bbef939a9315d4c48e41b712279c2e69a" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.597748 4769 scope.go:117] "RemoveContainer" containerID="338a8423adc3403c6e5ee2124377a3e2a9e45462d9c3952d90d353ce929ed541" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.610750 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.624811 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24vfz\" (UniqueName: \"kubernetes.io/projected/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-kube-api-access-24vfz\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.624902 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-public-tls-certs\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.625229 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-config-data\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.625302 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-internal-tls-certs\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.625412 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xs57\" (UniqueName: \"kubernetes.io/projected/6190d2cc-c021-420a-80e9-e66ce07fc13b-kube-api-access-5xs57\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.625455 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-combined-ca-bundle\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.625535 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-public-tls-certs\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.625799 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.626115 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-config-data\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.626226 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-scripts\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.626262 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6190d2cc-c021-420a-80e9-e66ce07fc13b-logs\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.626414 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.631001 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.634116 4769 scope.go:117] "RemoveContainer" containerID="050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.659735 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.664375 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.667016 4769 scope.go:117] "RemoveContainer" containerID="e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.667286 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.667749 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.675280 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.729106 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.729201 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24vfz\" (UniqueName: \"kubernetes.io/projected/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-kube-api-access-24vfz\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.729242 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-public-tls-certs\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.729294 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-config-data\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.729317 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-internal-tls-certs\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.729356 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xs57\" (UniqueName: \"kubernetes.io/projected/6190d2cc-c021-420a-80e9-e66ce07fc13b-kube-api-access-5xs57\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.729378 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-combined-ca-bundle\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.729407 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-public-tls-certs\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.729482 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.729522 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-config-data\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.729549 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-scripts\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.729569 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6190d2cc-c021-420a-80e9-e66ce07fc13b-logs\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.730047 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6190d2cc-c021-420a-80e9-e66ce07fc13b-logs\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.737121 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.739088 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-config-data\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.739122 4769 scope.go:117] "RemoveContainer" containerID="6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.739672 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.741932 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-combined-ca-bundle\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.743204 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-internal-tls-certs\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.743678 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-public-tls-certs\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.749402 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-scripts\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.749869 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-config-data\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.749917 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-public-tls-certs\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.753404 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24vfz\" (UniqueName: \"kubernetes.io/projected/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-kube-api-access-24vfz\") pod \"aodh-0\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.756807 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xs57\" (UniqueName: \"kubernetes.io/projected/6190d2cc-c021-420a-80e9-e66ce07fc13b-kube-api-access-5xs57\") pod \"nova-api-0\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.784663 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.816403 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.832225 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kckb2\" (UniqueName: \"kubernetes.io/projected/370b0510-6672-4acc-b393-069cf70f4443-kube-api-access-kckb2\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.832551 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.832814 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b0510-6672-4acc-b393-069cf70f4443-log-httpd\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.832880 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-scripts\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.833053 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b0510-6672-4acc-b393-069cf70f4443-run-httpd\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.833405 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-config-data\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.833705 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.937522 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kckb2\" (UniqueName: \"kubernetes.io/projected/370b0510-6672-4acc-b393-069cf70f4443-kube-api-access-kckb2\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.938263 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.938391 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b0510-6672-4acc-b393-069cf70f4443-log-httpd\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.938473 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-scripts\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.938558 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b0510-6672-4acc-b393-069cf70f4443-run-httpd\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.938753 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-config-data\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.938915 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.942693 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b0510-6672-4acc-b393-069cf70f4443-log-httpd\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.943005 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b0510-6672-4acc-b393-069cf70f4443-run-httpd\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.956839 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.962767 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kckb2\" (UniqueName: \"kubernetes.io/projected/370b0510-6672-4acc-b393-069cf70f4443-kube-api-access-kckb2\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.969112 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-scripts\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.982174 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-config-data\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.983228 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " pod="openstack/ceilometer-0" Nov 25 10:09:48 crc kubenswrapper[4769]: I1125 10:09:48.992889 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.194040 4769 scope.go:117] "RemoveContainer" containerID="c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec" Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.481430 4769 scope.go:117] "RemoveContainer" containerID="050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6" Nov 25 10:09:49 crc kubenswrapper[4769]: E1125 10:09:49.485971 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6\": container with ID starting with 050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6 not found: ID does not exist" containerID="050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6" Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.486031 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6"} err="failed to get container status \"050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6\": rpc error: code = NotFound desc = could not find container \"050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6\": container with ID starting with 050873cbdad1aea15009eec8f4f2cbbe3a4423d7a7c87f985c2bc7a0c096c0d6 not found: ID does not exist" Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.486059 4769 scope.go:117] "RemoveContainer" containerID="e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda" Nov 25 10:09:49 crc kubenswrapper[4769]: E1125 10:09:49.488840 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda\": container with ID starting with e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda not found: ID does not exist" containerID="e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda" Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.488872 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda"} err="failed to get container status \"e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda\": rpc error: code = NotFound desc = could not find container \"e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda\": container with ID starting with e0a1837b4686bc2a27481550ccdc9b55b2ee1a45f5e6118a49f32dc1670c8bda not found: ID does not exist" Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.488893 4769 scope.go:117] "RemoveContainer" containerID="6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed" Nov 25 10:09:49 crc kubenswrapper[4769]: E1125 10:09:49.516884 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed\": container with ID starting with 6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed not found: ID does not exist" containerID="6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed" Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.517331 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed"} err="failed to get container status \"6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed\": rpc error: code = NotFound desc = could not find container \"6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed\": container with ID starting with 6b296e2431a9e81d4f0a319547bf50978cc6864f0f03fdd4428f8cad0d4aefed not found: ID does not exist" Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.517374 4769 scope.go:117] "RemoveContainer" containerID="c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec" Nov 25 10:09:49 crc kubenswrapper[4769]: E1125 10:09:49.531098 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec\": container with ID starting with c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec not found: ID does not exist" containerID="c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec" Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.531152 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec"} err="failed to get container status \"c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec\": rpc error: code = NotFound desc = could not find container \"c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec\": container with ID starting with c7d63e54994a1ff033095bbb8b871af10d02f583143a36c367b0e335ec6e60ec not found: ID does not exist" Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.533542 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.830935 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.832889 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:09:49 crc kubenswrapper[4769]: I1125 10:09:49.998924 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:09:50 crc kubenswrapper[4769]: I1125 10:09:50.260090 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33335ac3-fa4e-4a44-89b7-71083bcf7930" path="/var/lib/kubelet/pods/33335ac3-fa4e-4a44-89b7-71083bcf7930/volumes" Nov 25 10:09:50 crc kubenswrapper[4769]: I1125 10:09:50.261943 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="738cf132-f225-43cb-a32d-b383bf9d4138" path="/var/lib/kubelet/pods/738cf132-f225-43cb-a32d-b383bf9d4138/volumes" Nov 25 10:09:50 crc kubenswrapper[4769]: I1125 10:09:50.262895 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9adc355f-c2d4-4184-9229-605ea979b390" path="/var/lib/kubelet/pods/9adc355f-c2d4-4184-9229-605ea979b390/volumes" Nov 25 10:09:50 crc kubenswrapper[4769]: I1125 10:09:50.414751 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b0510-6672-4acc-b393-069cf70f4443","Type":"ContainerStarted","Data":"0501c3e438c32c0ceed429c7c01881f029dbab877905be0d64d3c938561ecb41"} Nov 25 10:09:50 crc kubenswrapper[4769]: I1125 10:09:50.418144 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6190d2cc-c021-420a-80e9-e66ce07fc13b","Type":"ContainerStarted","Data":"07282414d162cad8bb927529d3f6232c788ffaf3df0ef28cdc251c47820f64c4"} Nov 25 10:09:50 crc kubenswrapper[4769]: I1125 10:09:50.418215 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6190d2cc-c021-420a-80e9-e66ce07fc13b","Type":"ContainerStarted","Data":"96b0d05399a561373871463045b78542f6901565931a53fbf1c8fab6e03a4f31"} Nov 25 10:09:50 crc kubenswrapper[4769]: I1125 10:09:50.418248 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6190d2cc-c021-420a-80e9-e66ce07fc13b","Type":"ContainerStarted","Data":"3cce779bd0e971ed7cc557616804d05ddbf5ad97e3dff08f78bb42b1a4d7dae8"} Nov 25 10:09:50 crc kubenswrapper[4769]: I1125 10:09:50.419170 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"29183bb3-0d2f-4df7-9729-d2bdfab57b2c","Type":"ContainerStarted","Data":"efa0463c3c062b22589c15e2f0fcfcb5f8b4a977bd1b45f6b814ce40a12094cb"} Nov 25 10:09:50 crc kubenswrapper[4769]: I1125 10:09:50.453414 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.453397528 podStartE2EDuration="2.453397528s" podCreationTimestamp="2025-11-25 10:09:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:09:50.449828506 +0000 UTC m=+1539.034800819" watchObservedRunningTime="2025-11-25 10:09:50.453397528 +0000 UTC m=+1539.038369841" Nov 25 10:09:51 crc kubenswrapper[4769]: I1125 10:09:51.434396 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b0510-6672-4acc-b393-069cf70f4443","Type":"ContainerStarted","Data":"04d7ddd5ed4ce5857962927dbc6c403c9f2cc92190604db4beccd0e2f0dc7da4"} Nov 25 10:09:51 crc kubenswrapper[4769]: I1125 10:09:51.434795 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b0510-6672-4acc-b393-069cf70f4443","Type":"ContainerStarted","Data":"7a3df91621e92af79b7382c95d50a6bfaad4ce55d25017498212b04efc37aeb6"} Nov 25 10:09:51 crc kubenswrapper[4769]: I1125 10:09:51.438064 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"29183bb3-0d2f-4df7-9729-d2bdfab57b2c","Type":"ContainerStarted","Data":"959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506"} Nov 25 10:09:51 crc kubenswrapper[4769]: I1125 10:09:51.438108 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"29183bb3-0d2f-4df7-9729-d2bdfab57b2c","Type":"ContainerStarted","Data":"29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050"} Nov 25 10:09:51 crc kubenswrapper[4769]: I1125 10:09:51.846305 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:09:51 crc kubenswrapper[4769]: I1125 10:09:51.964397 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-tgxvk"] Nov 25 10:09:51 crc kubenswrapper[4769]: I1125 10:09:51.964642 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" podUID="82684575-8aff-4214-bd2a-20289cc4446a" containerName="dnsmasq-dns" containerID="cri-o://513bcc0d36d6dace1da1845551ef157a03d1162376bd7f6d0ac36277a659431b" gracePeriod=10 Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.291460 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.292057 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.513753 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"29183bb3-0d2f-4df7-9729-d2bdfab57b2c","Type":"ContainerStarted","Data":"c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc"} Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.532226 4769 generic.go:334] "Generic (PLEG): container finished" podID="82684575-8aff-4214-bd2a-20289cc4446a" containerID="513bcc0d36d6dace1da1845551ef157a03d1162376bd7f6d0ac36277a659431b" exitCode=0 Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.532306 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" event={"ID":"82684575-8aff-4214-bd2a-20289cc4446a","Type":"ContainerDied","Data":"513bcc0d36d6dace1da1845551ef157a03d1162376bd7f6d0ac36277a659431b"} Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.533589 4769 generic.go:334] "Generic (PLEG): container finished" podID="28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158" containerID="b50fe91ee9b296b81fe73d33f1dd9caa29a3905ac6a627ba3d332b6c85ace7f0" exitCode=0 Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.533654 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-qh9h6" event={"ID":"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158","Type":"ContainerDied","Data":"b50fe91ee9b296b81fe73d33f1dd9caa29a3905ac6a627ba3d332b6c85ace7f0"} Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.550537 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b0510-6672-4acc-b393-069cf70f4443","Type":"ContainerStarted","Data":"ca4de0460af9d900786a83fa7a27a15416fbbc0104ed9a67f032bf6e577b57f4"} Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.619079 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.803627 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-ovsdbserver-nb\") pod \"82684575-8aff-4214-bd2a-20289cc4446a\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.804890 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-ovsdbserver-sb\") pod \"82684575-8aff-4214-bd2a-20289cc4446a\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.805025 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-dns-svc\") pod \"82684575-8aff-4214-bd2a-20289cc4446a\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.805168 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-dns-swift-storage-0\") pod \"82684575-8aff-4214-bd2a-20289cc4446a\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.805245 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-config\") pod \"82684575-8aff-4214-bd2a-20289cc4446a\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.805328 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxlht\" (UniqueName: \"kubernetes.io/projected/82684575-8aff-4214-bd2a-20289cc4446a-kube-api-access-lxlht\") pod \"82684575-8aff-4214-bd2a-20289cc4446a\" (UID: \"82684575-8aff-4214-bd2a-20289cc4446a\") " Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.815913 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82684575-8aff-4214-bd2a-20289cc4446a-kube-api-access-lxlht" (OuterVolumeSpecName: "kube-api-access-lxlht") pod "82684575-8aff-4214-bd2a-20289cc4446a" (UID: "82684575-8aff-4214-bd2a-20289cc4446a"). InnerVolumeSpecName "kube-api-access-lxlht". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.878801 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "82684575-8aff-4214-bd2a-20289cc4446a" (UID: "82684575-8aff-4214-bd2a-20289cc4446a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.883772 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "82684575-8aff-4214-bd2a-20289cc4446a" (UID: "82684575-8aff-4214-bd2a-20289cc4446a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.885571 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-config" (OuterVolumeSpecName: "config") pod "82684575-8aff-4214-bd2a-20289cc4446a" (UID: "82684575-8aff-4214-bd2a-20289cc4446a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.906880 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "82684575-8aff-4214-bd2a-20289cc4446a" (UID: "82684575-8aff-4214-bd2a-20289cc4446a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.910514 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.910550 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.910563 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.910575 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxlht\" (UniqueName: \"kubernetes.io/projected/82684575-8aff-4214-bd2a-20289cc4446a-kube-api-access-lxlht\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.910609 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:52 crc kubenswrapper[4769]: I1125 10:09:52.932529 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "82684575-8aff-4214-bd2a-20289cc4446a" (UID: "82684575-8aff-4214-bd2a-20289cc4446a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:09:53 crc kubenswrapper[4769]: I1125 10:09:53.013572 4769 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/82684575-8aff-4214-bd2a-20289cc4446a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:53 crc kubenswrapper[4769]: I1125 10:09:53.565416 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" Nov 25 10:09:53 crc kubenswrapper[4769]: I1125 10:09:53.565697 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fbc4d444f-tgxvk" event={"ID":"82684575-8aff-4214-bd2a-20289cc4446a","Type":"ContainerDied","Data":"d66c7b2fba4ba8d4c480f8a9490318a45822b5ad7d9b40ebb131e366c162b20c"} Nov 25 10:09:53 crc kubenswrapper[4769]: I1125 10:09:53.565752 4769 scope.go:117] "RemoveContainer" containerID="513bcc0d36d6dace1da1845551ef157a03d1162376bd7f6d0ac36277a659431b" Nov 25 10:09:53 crc kubenswrapper[4769]: I1125 10:09:53.570626 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"29183bb3-0d2f-4df7-9729-d2bdfab57b2c","Type":"ContainerStarted","Data":"5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8"} Nov 25 10:09:53 crc kubenswrapper[4769]: I1125 10:09:53.603365 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.376760523 podStartE2EDuration="5.60334822s" podCreationTimestamp="2025-11-25 10:09:48 +0000 UTC" firstStartedPulling="2025-11-25 10:09:49.830570707 +0000 UTC m=+1538.415543020" lastFinishedPulling="2025-11-25 10:09:53.057158404 +0000 UTC m=+1541.642130717" observedRunningTime="2025-11-25 10:09:53.601907213 +0000 UTC m=+1542.186879546" watchObservedRunningTime="2025-11-25 10:09:53.60334822 +0000 UTC m=+1542.188320533" Nov 25 10:09:53 crc kubenswrapper[4769]: I1125 10:09:53.626043 4769 scope.go:117] "RemoveContainer" containerID="b2732f1b5e7d004b02f39149542974a71c63cc17b214af80fb4e4b75ab1b9654" Nov 25 10:09:53 crc kubenswrapper[4769]: I1125 10:09:53.682187 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-tgxvk"] Nov 25 10:09:53 crc kubenswrapper[4769]: I1125 10:09:53.700732 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5fbc4d444f-tgxvk"] Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.180991 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.251157 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82684575-8aff-4214-bd2a-20289cc4446a" path="/var/lib/kubelet/pods/82684575-8aff-4214-bd2a-20289cc4446a/volumes" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.347035 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-combined-ca-bundle\") pod \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.347134 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5gtr\" (UniqueName: \"kubernetes.io/projected/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-kube-api-access-j5gtr\") pod \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.347282 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-scripts\") pod \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.347458 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-config-data\") pod \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\" (UID: \"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158\") " Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.354940 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-kube-api-access-j5gtr" (OuterVolumeSpecName: "kube-api-access-j5gtr") pod "28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158" (UID: "28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158"). InnerVolumeSpecName "kube-api-access-j5gtr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.355106 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-scripts" (OuterVolumeSpecName: "scripts") pod "28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158" (UID: "28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.394463 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158" (UID: "28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.398301 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-config-data" (OuterVolumeSpecName: "config-data") pod "28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158" (UID: "28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.453096 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.453571 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.453585 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5gtr\" (UniqueName: \"kubernetes.io/projected/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-kube-api-access-j5gtr\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.453595 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.584447 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-qh9h6" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.584466 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-qh9h6" event={"ID":"28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158","Type":"ContainerDied","Data":"0a0964eb5908b01082d139121090e19eae779b8d823f045faa5007cddacff3e2"} Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.585851 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a0964eb5908b01082d139121090e19eae779b8d823f045faa5007cddacff3e2" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.589415 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b0510-6672-4acc-b393-069cf70f4443","Type":"ContainerStarted","Data":"9de712960d357704cf284adea90f9e8be114bf6d46a25d85f5fb25be992292f7"} Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.589497 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.623953 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.709553015 podStartE2EDuration="6.62393331s" podCreationTimestamp="2025-11-25 10:09:48 +0000 UTC" firstStartedPulling="2025-11-25 10:09:50.003428149 +0000 UTC m=+1538.588400462" lastFinishedPulling="2025-11-25 10:09:53.917808444 +0000 UTC m=+1542.502780757" observedRunningTime="2025-11-25 10:09:54.614674482 +0000 UTC m=+1543.199646795" watchObservedRunningTime="2025-11-25 10:09:54.62393331 +0000 UTC m=+1543.208905623" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.705631 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.705982 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="6190d2cc-c021-420a-80e9-e66ce07fc13b" containerName="nova-api-log" containerID="cri-o://96b0d05399a561373871463045b78542f6901565931a53fbf1c8fab6e03a4f31" gracePeriod=30 Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.706771 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="6190d2cc-c021-420a-80e9-e66ce07fc13b" containerName="nova-api-api" containerID="cri-o://07282414d162cad8bb927529d3f6232c788ffaf3df0ef28cdc251c47820f64c4" gracePeriod=30 Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.768398 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.768646 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="74f2d1fa-6292-4290-9ad8-798f3c41bf97" containerName="nova-scheduler-scheduler" containerID="cri-o://8d927469f2ced3c9a1b43bb1f882271590dbbfc3d602246ab1dd43aa843627a1" gracePeriod=30 Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.808450 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.808722 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerName="nova-metadata-log" containerID="cri-o://cb46e19254772a8cd0409a33a4f7288ff20128a4c094d2efde9e70ace0eb4c42" gracePeriod=30 Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.809216 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerName="nova-metadata-metadata" containerID="cri-o://9c6397bfe52edc42bffc88bb2a1b032782b7622c93584743e8c614f4c7d819b3" gracePeriod=30 Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.818937 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/nova-metadata-0" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.248:8775/\": read tcp 10.217.0.2:33112->10.217.0.248:8775: read: connection reset by peer" Nov 25 10:09:54 crc kubenswrapper[4769]: I1125 10:09:54.819027 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.248:8775/\": EOF" Nov 25 10:09:55 crc kubenswrapper[4769]: I1125 10:09:55.605908 4769 generic.go:334] "Generic (PLEG): container finished" podID="6190d2cc-c021-420a-80e9-e66ce07fc13b" containerID="07282414d162cad8bb927529d3f6232c788ffaf3df0ef28cdc251c47820f64c4" exitCode=0 Nov 25 10:09:55 crc kubenswrapper[4769]: I1125 10:09:55.606451 4769 generic.go:334] "Generic (PLEG): container finished" podID="6190d2cc-c021-420a-80e9-e66ce07fc13b" containerID="96b0d05399a561373871463045b78542f6901565931a53fbf1c8fab6e03a4f31" exitCode=143 Nov 25 10:09:55 crc kubenswrapper[4769]: I1125 10:09:55.606505 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6190d2cc-c021-420a-80e9-e66ce07fc13b","Type":"ContainerDied","Data":"07282414d162cad8bb927529d3f6232c788ffaf3df0ef28cdc251c47820f64c4"} Nov 25 10:09:55 crc kubenswrapper[4769]: I1125 10:09:55.606532 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6190d2cc-c021-420a-80e9-e66ce07fc13b","Type":"ContainerDied","Data":"96b0d05399a561373871463045b78542f6901565931a53fbf1c8fab6e03a4f31"} Nov 25 10:09:55 crc kubenswrapper[4769]: I1125 10:09:55.609208 4769 generic.go:334] "Generic (PLEG): container finished" podID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerID="cb46e19254772a8cd0409a33a4f7288ff20128a4c094d2efde9e70ace0eb4c42" exitCode=143 Nov 25 10:09:55 crc kubenswrapper[4769]: I1125 10:09:55.609254 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"44a42f95-baf9-4949-a54e-bc3eadadc774","Type":"ContainerDied","Data":"cb46e19254772a8cd0409a33a4f7288ff20128a4c094d2efde9e70ace0eb4c42"} Nov 25 10:09:55 crc kubenswrapper[4769]: E1125 10:09:55.781500 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8d927469f2ced3c9a1b43bb1f882271590dbbfc3d602246ab1dd43aa843627a1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 10:09:55 crc kubenswrapper[4769]: E1125 10:09:55.791386 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8d927469f2ced3c9a1b43bb1f882271590dbbfc3d602246ab1dd43aa843627a1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 10:09:55 crc kubenswrapper[4769]: E1125 10:09:55.802362 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8d927469f2ced3c9a1b43bb1f882271590dbbfc3d602246ab1dd43aa843627a1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 10:09:55 crc kubenswrapper[4769]: E1125 10:09:55.802451 4769 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="74f2d1fa-6292-4290-9ad8-798f3c41bf97" containerName="nova-scheduler-scheduler" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.041729 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.122701 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-internal-tls-certs\") pod \"6190d2cc-c021-420a-80e9-e66ce07fc13b\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.122899 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-config-data\") pod \"6190d2cc-c021-420a-80e9-e66ce07fc13b\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.123059 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-public-tls-certs\") pod \"6190d2cc-c021-420a-80e9-e66ce07fc13b\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.123237 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-combined-ca-bundle\") pod \"6190d2cc-c021-420a-80e9-e66ce07fc13b\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.123306 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6190d2cc-c021-420a-80e9-e66ce07fc13b-logs\") pod \"6190d2cc-c021-420a-80e9-e66ce07fc13b\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.123334 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xs57\" (UniqueName: \"kubernetes.io/projected/6190d2cc-c021-420a-80e9-e66ce07fc13b-kube-api-access-5xs57\") pod \"6190d2cc-c021-420a-80e9-e66ce07fc13b\" (UID: \"6190d2cc-c021-420a-80e9-e66ce07fc13b\") " Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.125799 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6190d2cc-c021-420a-80e9-e66ce07fc13b-logs" (OuterVolumeSpecName: "logs") pod "6190d2cc-c021-420a-80e9-e66ce07fc13b" (UID: "6190d2cc-c021-420a-80e9-e66ce07fc13b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.131251 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6190d2cc-c021-420a-80e9-e66ce07fc13b-kube-api-access-5xs57" (OuterVolumeSpecName: "kube-api-access-5xs57") pod "6190d2cc-c021-420a-80e9-e66ce07fc13b" (UID: "6190d2cc-c021-420a-80e9-e66ce07fc13b"). InnerVolumeSpecName "kube-api-access-5xs57". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.167264 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6190d2cc-c021-420a-80e9-e66ce07fc13b" (UID: "6190d2cc-c021-420a-80e9-e66ce07fc13b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.167699 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-config-data" (OuterVolumeSpecName: "config-data") pod "6190d2cc-c021-420a-80e9-e66ce07fc13b" (UID: "6190d2cc-c021-420a-80e9-e66ce07fc13b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.215647 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "6190d2cc-c021-420a-80e9-e66ce07fc13b" (UID: "6190d2cc-c021-420a-80e9-e66ce07fc13b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.227473 4769 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.227510 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.227520 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.227531 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xs57\" (UniqueName: \"kubernetes.io/projected/6190d2cc-c021-420a-80e9-e66ce07fc13b-kube-api-access-5xs57\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.227543 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6190d2cc-c021-420a-80e9-e66ce07fc13b-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.269711 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "6190d2cc-c021-420a-80e9-e66ce07fc13b" (UID: "6190d2cc-c021-420a-80e9-e66ce07fc13b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.329480 4769 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6190d2cc-c021-420a-80e9-e66ce07fc13b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:56 crc kubenswrapper[4769]: E1125 10:09:56.508244 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6190d2cc_c021_420a_80e9_e66ce07fc13b.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6190d2cc_c021_420a_80e9_e66ce07fc13b.slice/crio-3cce779bd0e971ed7cc557616804d05ddbf5ad97e3dff08f78bb42b1a4d7dae8\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74f2d1fa_6292_4290_9ad8_798f3c41bf97.slice/crio-conmon-8d927469f2ced3c9a1b43bb1f882271590dbbfc3d602246ab1dd43aa843627a1.scope\": RecentStats: unable to find data in memory cache]" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.621030 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6190d2cc-c021-420a-80e9-e66ce07fc13b","Type":"ContainerDied","Data":"3cce779bd0e971ed7cc557616804d05ddbf5ad97e3dff08f78bb42b1a4d7dae8"} Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.621118 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.621474 4769 scope.go:117] "RemoveContainer" containerID="07282414d162cad8bb927529d3f6232c788ffaf3df0ef28cdc251c47820f64c4" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.624552 4769 generic.go:334] "Generic (PLEG): container finished" podID="74f2d1fa-6292-4290-9ad8-798f3c41bf97" containerID="8d927469f2ced3c9a1b43bb1f882271590dbbfc3d602246ab1dd43aa843627a1" exitCode=0 Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.624610 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"74f2d1fa-6292-4290-9ad8-798f3c41bf97","Type":"ContainerDied","Data":"8d927469f2ced3c9a1b43bb1f882271590dbbfc3d602246ab1dd43aa843627a1"} Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.701678 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.706344 4769 scope.go:117] "RemoveContainer" containerID="96b0d05399a561373871463045b78542f6901565931a53fbf1c8fab6e03a4f31" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.719913 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.733036 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:56 crc kubenswrapper[4769]: E1125 10:09:56.733639 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6190d2cc-c021-420a-80e9-e66ce07fc13b" containerName="nova-api-log" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.733663 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="6190d2cc-c021-420a-80e9-e66ce07fc13b" containerName="nova-api-log" Nov 25 10:09:56 crc kubenswrapper[4769]: E1125 10:09:56.733690 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82684575-8aff-4214-bd2a-20289cc4446a" containerName="init" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.733697 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="82684575-8aff-4214-bd2a-20289cc4446a" containerName="init" Nov 25 10:09:56 crc kubenswrapper[4769]: E1125 10:09:56.733723 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158" containerName="nova-manage" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.733729 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158" containerName="nova-manage" Nov 25 10:09:56 crc kubenswrapper[4769]: E1125 10:09:56.733740 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6190d2cc-c021-420a-80e9-e66ce07fc13b" containerName="nova-api-api" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.733748 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="6190d2cc-c021-420a-80e9-e66ce07fc13b" containerName="nova-api-api" Nov 25 10:09:56 crc kubenswrapper[4769]: E1125 10:09:56.733766 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82684575-8aff-4214-bd2a-20289cc4446a" containerName="dnsmasq-dns" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.733772 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="82684575-8aff-4214-bd2a-20289cc4446a" containerName="dnsmasq-dns" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.734049 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158" containerName="nova-manage" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.734064 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="82684575-8aff-4214-bd2a-20289cc4446a" containerName="dnsmasq-dns" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.734072 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="6190d2cc-c021-420a-80e9-e66ce07fc13b" containerName="nova-api-log" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.734086 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="6190d2cc-c021-420a-80e9-e66ce07fc13b" containerName="nova-api-api" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.735375 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.741746 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.741934 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.742176 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.760097 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.845105 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a4d6dd4-fef8-443c-9266-9641e672100e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.845235 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a4d6dd4-fef8-443c-9266-9641e672100e-logs\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.845336 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a4d6dd4-fef8-443c-9266-9641e672100e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.845375 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a4d6dd4-fef8-443c-9266-9641e672100e-public-tls-certs\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.845407 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a4d6dd4-fef8-443c-9266-9641e672100e-config-data\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.845444 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46rct\" (UniqueName: \"kubernetes.io/projected/3a4d6dd4-fef8-443c-9266-9641e672100e-kube-api-access-46rct\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.948147 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a4d6dd4-fef8-443c-9266-9641e672100e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.948214 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a4d6dd4-fef8-443c-9266-9641e672100e-public-tls-certs\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.948260 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a4d6dd4-fef8-443c-9266-9641e672100e-config-data\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.948308 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46rct\" (UniqueName: \"kubernetes.io/projected/3a4d6dd4-fef8-443c-9266-9641e672100e-kube-api-access-46rct\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.949335 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a4d6dd4-fef8-443c-9266-9641e672100e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.949429 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a4d6dd4-fef8-443c-9266-9641e672100e-logs\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.950393 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a4d6dd4-fef8-443c-9266-9641e672100e-logs\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.956654 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a4d6dd4-fef8-443c-9266-9641e672100e-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.956694 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a4d6dd4-fef8-443c-9266-9641e672100e-public-tls-certs\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.957275 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a4d6dd4-fef8-443c-9266-9641e672100e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.962876 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a4d6dd4-fef8-443c-9266-9641e672100e-config-data\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:56 crc kubenswrapper[4769]: I1125 10:09:56.982607 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46rct\" (UniqueName: \"kubernetes.io/projected/3a4d6dd4-fef8-443c-9266-9641e672100e-kube-api-access-46rct\") pod \"nova-api-0\" (UID: \"3a4d6dd4-fef8-443c-9266-9641e672100e\") " pod="openstack/nova-api-0" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.074721 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.106746 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.258160 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95t42\" (UniqueName: \"kubernetes.io/projected/74f2d1fa-6292-4290-9ad8-798f3c41bf97-kube-api-access-95t42\") pod \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\" (UID: \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\") " Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.258356 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74f2d1fa-6292-4290-9ad8-798f3c41bf97-combined-ca-bundle\") pod \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\" (UID: \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\") " Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.258455 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74f2d1fa-6292-4290-9ad8-798f3c41bf97-config-data\") pod \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\" (UID: \"74f2d1fa-6292-4290-9ad8-798f3c41bf97\") " Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.263737 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74f2d1fa-6292-4290-9ad8-798f3c41bf97-kube-api-access-95t42" (OuterVolumeSpecName: "kube-api-access-95t42") pod "74f2d1fa-6292-4290-9ad8-798f3c41bf97" (UID: "74f2d1fa-6292-4290-9ad8-798f3c41bf97"). InnerVolumeSpecName "kube-api-access-95t42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.296089 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74f2d1fa-6292-4290-9ad8-798f3c41bf97-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "74f2d1fa-6292-4290-9ad8-798f3c41bf97" (UID: "74f2d1fa-6292-4290-9ad8-798f3c41bf97"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.309470 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74f2d1fa-6292-4290-9ad8-798f3c41bf97-config-data" (OuterVolumeSpecName: "config-data") pod "74f2d1fa-6292-4290-9ad8-798f3c41bf97" (UID: "74f2d1fa-6292-4290-9ad8-798f3c41bf97"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.366157 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95t42\" (UniqueName: \"kubernetes.io/projected/74f2d1fa-6292-4290-9ad8-798f3c41bf97-kube-api-access-95t42\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.366201 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74f2d1fa-6292-4290-9ad8-798f3c41bf97-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.366216 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74f2d1fa-6292-4290-9ad8-798f3c41bf97-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.646027 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"74f2d1fa-6292-4290-9ad8-798f3c41bf97","Type":"ContainerDied","Data":"fd16c50f0cfb156afabfa54fbae19ce0d9e037e0c211f5fc70d6f7e7fa051a11"} Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.646554 4769 scope.go:117] "RemoveContainer" containerID="8d927469f2ced3c9a1b43bb1f882271590dbbfc3d602246ab1dd43aa843627a1" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.646082 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:09:57 crc kubenswrapper[4769]: W1125 10:09:57.707974 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a4d6dd4_fef8_443c_9266_9641e672100e.slice/crio-a394b7ac3012ce25ca1f69a01084b82ac8a2c3cc8402ba0176c567b8085ebebd WatchSource:0}: Error finding container a394b7ac3012ce25ca1f69a01084b82ac8a2c3cc8402ba0176c567b8085ebebd: Status 404 returned error can't find the container with id a394b7ac3012ce25ca1f69a01084b82ac8a2c3cc8402ba0176c567b8085ebebd Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.708874 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.720717 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.756325 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.783030 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:09:57 crc kubenswrapper[4769]: E1125 10:09:57.784542 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74f2d1fa-6292-4290-9ad8-798f3c41bf97" containerName="nova-scheduler-scheduler" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.784575 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="74f2d1fa-6292-4290-9ad8-798f3c41bf97" containerName="nova-scheduler-scheduler" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.785452 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="74f2d1fa-6292-4290-9ad8-798f3c41bf97" containerName="nova-scheduler-scheduler" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.789454 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.792763 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.818105 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.889400 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cfece4-fd70-4c52-88c9-f29be558844f-config-data\") pod \"nova-scheduler-0\" (UID: \"25cfece4-fd70-4c52-88c9-f29be558844f\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.889595 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bplx2\" (UniqueName: \"kubernetes.io/projected/25cfece4-fd70-4c52-88c9-f29be558844f-kube-api-access-bplx2\") pod \"nova-scheduler-0\" (UID: \"25cfece4-fd70-4c52-88c9-f29be558844f\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.889717 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cfece4-fd70-4c52-88c9-f29be558844f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"25cfece4-fd70-4c52-88c9-f29be558844f\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.993273 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cfece4-fd70-4c52-88c9-f29be558844f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"25cfece4-fd70-4c52-88c9-f29be558844f\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.993439 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cfece4-fd70-4c52-88c9-f29be558844f-config-data\") pod \"nova-scheduler-0\" (UID: \"25cfece4-fd70-4c52-88c9-f29be558844f\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:57 crc kubenswrapper[4769]: I1125 10:09:57.993508 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bplx2\" (UniqueName: \"kubernetes.io/projected/25cfece4-fd70-4c52-88c9-f29be558844f-kube-api-access-bplx2\") pod \"nova-scheduler-0\" (UID: \"25cfece4-fd70-4c52-88c9-f29be558844f\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.001520 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cfece4-fd70-4c52-88c9-f29be558844f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"25cfece4-fd70-4c52-88c9-f29be558844f\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.002611 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cfece4-fd70-4c52-88c9-f29be558844f-config-data\") pod \"nova-scheduler-0\" (UID: \"25cfece4-fd70-4c52-88c9-f29be558844f\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.013824 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bplx2\" (UniqueName: \"kubernetes.io/projected/25cfece4-fd70-4c52-88c9-f29be558844f-kube-api-access-bplx2\") pod \"nova-scheduler-0\" (UID: \"25cfece4-fd70-4c52-88c9-f29be558844f\") " pod="openstack/nova-scheduler-0" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.147149 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.270760 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6190d2cc-c021-420a-80e9-e66ce07fc13b" path="/var/lib/kubelet/pods/6190d2cc-c021-420a-80e9-e66ce07fc13b/volumes" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.271564 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74f2d1fa-6292-4290-9ad8-798f3c41bf97" path="/var/lib/kubelet/pods/74f2d1fa-6292-4290-9ad8-798f3c41bf97/volumes" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.674405 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.684123 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3a4d6dd4-fef8-443c-9266-9641e672100e","Type":"ContainerStarted","Data":"9290bac56541730c9a8a2061e307b022466d470fde85ebb8c931791bbd94bdba"} Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.684181 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3a4d6dd4-fef8-443c-9266-9641e672100e","Type":"ContainerStarted","Data":"ed7d021920c8022410c212444aa37eb1bf80dfeacfdc2a13f90aee6ecc4c0716"} Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.684196 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3a4d6dd4-fef8-443c-9266-9641e672100e","Type":"ContainerStarted","Data":"a394b7ac3012ce25ca1f69a01084b82ac8a2c3cc8402ba0176c567b8085ebebd"} Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.699273 4769 generic.go:334] "Generic (PLEG): container finished" podID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerID="9c6397bfe52edc42bffc88bb2a1b032782b7622c93584743e8c614f4c7d819b3" exitCode=0 Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.699327 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"44a42f95-baf9-4949-a54e-bc3eadadc774","Type":"ContainerDied","Data":"9c6397bfe52edc42bffc88bb2a1b032782b7622c93584743e8c614f4c7d819b3"} Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.726830 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.726806998 podStartE2EDuration="2.726806998s" podCreationTimestamp="2025-11-25 10:09:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:09:58.716455942 +0000 UTC m=+1547.301428255" watchObservedRunningTime="2025-11-25 10:09:58.726806998 +0000 UTC m=+1547.311779311" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.776918 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.921120 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2v68z\" (UniqueName: \"kubernetes.io/projected/44a42f95-baf9-4949-a54e-bc3eadadc774-kube-api-access-2v68z\") pod \"44a42f95-baf9-4949-a54e-bc3eadadc774\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.921612 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44a42f95-baf9-4949-a54e-bc3eadadc774-logs\") pod \"44a42f95-baf9-4949-a54e-bc3eadadc774\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.921681 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-nova-metadata-tls-certs\") pod \"44a42f95-baf9-4949-a54e-bc3eadadc774\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.921849 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-config-data\") pod \"44a42f95-baf9-4949-a54e-bc3eadadc774\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.922165 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44a42f95-baf9-4949-a54e-bc3eadadc774-logs" (OuterVolumeSpecName: "logs") pod "44a42f95-baf9-4949-a54e-bc3eadadc774" (UID: "44a42f95-baf9-4949-a54e-bc3eadadc774"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.922361 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-combined-ca-bundle\") pod \"44a42f95-baf9-4949-a54e-bc3eadadc774\" (UID: \"44a42f95-baf9-4949-a54e-bc3eadadc774\") " Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.923347 4769 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44a42f95-baf9-4949-a54e-bc3eadadc774-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.935519 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44a42f95-baf9-4949-a54e-bc3eadadc774-kube-api-access-2v68z" (OuterVolumeSpecName: "kube-api-access-2v68z") pod "44a42f95-baf9-4949-a54e-bc3eadadc774" (UID: "44a42f95-baf9-4949-a54e-bc3eadadc774"). InnerVolumeSpecName "kube-api-access-2v68z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.973662 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "44a42f95-baf9-4949-a54e-bc3eadadc774" (UID: "44a42f95-baf9-4949-a54e-bc3eadadc774"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:58 crc kubenswrapper[4769]: I1125 10:09:58.982186 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-config-data" (OuterVolumeSpecName: "config-data") pod "44a42f95-baf9-4949-a54e-bc3eadadc774" (UID: "44a42f95-baf9-4949-a54e-bc3eadadc774"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.011198 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "44a42f95-baf9-4949-a54e-bc3eadadc774" (UID: "44a42f95-baf9-4949-a54e-bc3eadadc774"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.026410 4769 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.026446 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.026461 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44a42f95-baf9-4949-a54e-bc3eadadc774-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.026472 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2v68z\" (UniqueName: \"kubernetes.io/projected/44a42f95-baf9-4949-a54e-bc3eadadc774-kube-api-access-2v68z\") on node \"crc\" DevicePath \"\"" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.729521 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"25cfece4-fd70-4c52-88c9-f29be558844f","Type":"ContainerStarted","Data":"e7cc963350d8cbb45aa18ddf1f5a4924b555d417ad1ef828cd590b31dd50bb86"} Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.729934 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"25cfece4-fd70-4c52-88c9-f29be558844f","Type":"ContainerStarted","Data":"a54d0dc1d9ed08b130a09a8d1ee8719ab39bbc911685a4e4d7822a312bef5196"} Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.738620 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.738621 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"44a42f95-baf9-4949-a54e-bc3eadadc774","Type":"ContainerDied","Data":"966ac7d0e7e68e1320e6ef2313fc0465c6a1b9f7a0ae6dae13fab930dca6fe92"} Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.738811 4769 scope.go:117] "RemoveContainer" containerID="9c6397bfe52edc42bffc88bb2a1b032782b7622c93584743e8c614f4c7d819b3" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.762269 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.76224942 podStartE2EDuration="2.76224942s" podCreationTimestamp="2025-11-25 10:09:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:09:59.752609103 +0000 UTC m=+1548.337581426" watchObservedRunningTime="2025-11-25 10:09:59.76224942 +0000 UTC m=+1548.347221733" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.787157 4769 scope.go:117] "RemoveContainer" containerID="cb46e19254772a8cd0409a33a4f7288ff20128a4c094d2efde9e70ace0eb4c42" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.804195 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.827804 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.849983 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:59 crc kubenswrapper[4769]: E1125 10:09:59.850670 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerName="nova-metadata-log" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.850699 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerName="nova-metadata-log" Nov 25 10:09:59 crc kubenswrapper[4769]: E1125 10:09:59.850770 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerName="nova-metadata-metadata" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.850780 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerName="nova-metadata-metadata" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.851115 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerName="nova-metadata-metadata" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.851137 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" containerName="nova-metadata-log" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.852761 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.861092 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.863994 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.865498 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.965101 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c17c0cb-e73b-466d-8fae-ad581561fcb0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.965187 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c17c0cb-e73b-466d-8fae-ad581561fcb0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.965222 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvrx7\" (UniqueName: \"kubernetes.io/projected/7c17c0cb-e73b-466d-8fae-ad581561fcb0-kube-api-access-cvrx7\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.965293 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c17c0cb-e73b-466d-8fae-ad581561fcb0-logs\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:09:59 crc kubenswrapper[4769]: I1125 10:09:59.965334 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c17c0cb-e73b-466d-8fae-ad581561fcb0-config-data\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.067926 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c17c0cb-e73b-466d-8fae-ad581561fcb0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.068057 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvrx7\" (UniqueName: \"kubernetes.io/projected/7c17c0cb-e73b-466d-8fae-ad581561fcb0-kube-api-access-cvrx7\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.068229 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c17c0cb-e73b-466d-8fae-ad581561fcb0-logs\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.068360 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c17c0cb-e73b-466d-8fae-ad581561fcb0-config-data\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.068591 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c17c0cb-e73b-466d-8fae-ad581561fcb0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.069241 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c17c0cb-e73b-466d-8fae-ad581561fcb0-logs\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.074813 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c17c0cb-e73b-466d-8fae-ad581561fcb0-config-data\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.075034 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c17c0cb-e73b-466d-8fae-ad581561fcb0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.084889 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c17c0cb-e73b-466d-8fae-ad581561fcb0-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.088609 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvrx7\" (UniqueName: \"kubernetes.io/projected/7c17c0cb-e73b-466d-8fae-ad581561fcb0-kube-api-access-cvrx7\") pod \"nova-metadata-0\" (UID: \"7c17c0cb-e73b-466d-8fae-ad581561fcb0\") " pod="openstack/nova-metadata-0" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.175589 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.303373 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44a42f95-baf9-4949-a54e-bc3eadadc774" path="/var/lib/kubelet/pods/44a42f95-baf9-4949-a54e-bc3eadadc774/volumes" Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.737091 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:10:00 crc kubenswrapper[4769]: I1125 10:10:00.794727 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7c17c0cb-e73b-466d-8fae-ad581561fcb0","Type":"ContainerStarted","Data":"36034d1a644262592fcd67880c9e8cac0bef2764d6f801d08273433ea4895890"} Nov 25 10:10:01 crc kubenswrapper[4769]: I1125 10:10:01.812865 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7c17c0cb-e73b-466d-8fae-ad581561fcb0","Type":"ContainerStarted","Data":"e55ba193adec33779ba7ea3a5dff3467d1b713820dfbb1236f592579c141dee4"} Nov 25 10:10:01 crc kubenswrapper[4769]: I1125 10:10:01.813472 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7c17c0cb-e73b-466d-8fae-ad581561fcb0","Type":"ContainerStarted","Data":"cb9ee5e7d853d9cee25165d658f3ce9bd70601a1b2f70c93d8cdd728ed089e8c"} Nov 25 10:10:01 crc kubenswrapper[4769]: I1125 10:10:01.860907 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.860876923 podStartE2EDuration="2.860876923s" podCreationTimestamp="2025-11-25 10:09:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:10:01.838454269 +0000 UTC m=+1550.423426622" watchObservedRunningTime="2025-11-25 10:10:01.860876923 +0000 UTC m=+1550.445849276" Nov 25 10:10:03 crc kubenswrapper[4769]: I1125 10:10:03.147499 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 10:10:05 crc kubenswrapper[4769]: I1125 10:10:05.176361 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:10:05 crc kubenswrapper[4769]: I1125 10:10:05.176688 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:10:07 crc kubenswrapper[4769]: I1125 10:10:07.075649 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:10:07 crc kubenswrapper[4769]: I1125 10:10:07.076178 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:10:08 crc kubenswrapper[4769]: I1125 10:10:08.096127 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3a4d6dd4-fef8-443c-9266-9641e672100e" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.2:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:10:08 crc kubenswrapper[4769]: I1125 10:10:08.097189 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3a4d6dd4-fef8-443c-9266-9641e672100e" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.2:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:10:08 crc kubenswrapper[4769]: I1125 10:10:08.148195 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 10:10:08 crc kubenswrapper[4769]: I1125 10:10:08.182379 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 10:10:08 crc kubenswrapper[4769]: I1125 10:10:08.980537 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 10:10:10 crc kubenswrapper[4769]: I1125 10:10:10.179977 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 10:10:10 crc kubenswrapper[4769]: I1125 10:10:10.180043 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 10:10:11 crc kubenswrapper[4769]: I1125 10:10:11.208185 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7c17c0cb-e73b-466d-8fae-ad581561fcb0" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.4:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:10:11 crc kubenswrapper[4769]: I1125 10:10:11.208256 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="7c17c0cb-e73b-466d-8fae-ad581561fcb0" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.4:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:10:17 crc kubenswrapper[4769]: I1125 10:10:17.100836 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 10:10:17 crc kubenswrapper[4769]: I1125 10:10:17.101692 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:10:17 crc kubenswrapper[4769]: I1125 10:10:17.109105 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 10:10:17 crc kubenswrapper[4769]: I1125 10:10:17.114400 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 10:10:18 crc kubenswrapper[4769]: I1125 10:10:18.092273 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:10:18 crc kubenswrapper[4769]: I1125 10:10:18.101545 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 10:10:19 crc kubenswrapper[4769]: I1125 10:10:19.003602 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 10:10:20 crc kubenswrapper[4769]: I1125 10:10:20.190925 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 10:10:20 crc kubenswrapper[4769]: I1125 10:10:20.191631 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 10:10:20 crc kubenswrapper[4769]: I1125 10:10:20.203953 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 10:10:20 crc kubenswrapper[4769]: I1125 10:10:20.204126 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 10:10:22 crc kubenswrapper[4769]: I1125 10:10:22.290334 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:10:22 crc kubenswrapper[4769]: I1125 10:10:22.290716 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:10:22 crc kubenswrapper[4769]: I1125 10:10:22.290773 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 10:10:22 crc kubenswrapper[4769]: I1125 10:10:22.291874 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:10:22 crc kubenswrapper[4769]: I1125 10:10:22.291956 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" gracePeriod=600 Nov 25 10:10:22 crc kubenswrapper[4769]: E1125 10:10:22.439050 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:10:23 crc kubenswrapper[4769]: I1125 10:10:23.174094 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" exitCode=0 Nov 25 10:10:23 crc kubenswrapper[4769]: I1125 10:10:23.174437 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7"} Nov 25 10:10:23 crc kubenswrapper[4769]: I1125 10:10:23.174472 4769 scope.go:117] "RemoveContainer" containerID="94adcb75d0d9250faebaf531d11103a1b9f1da8a7a156454e1964c577d865b3a" Nov 25 10:10:23 crc kubenswrapper[4769]: I1125 10:10:23.175264 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:10:23 crc kubenswrapper[4769]: E1125 10:10:23.175577 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:10:23 crc kubenswrapper[4769]: I1125 10:10:23.937839 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:10:23 crc kubenswrapper[4769]: I1125 10:10:23.938309 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="75a23be3-741b-4e4e-a168-4cc37f54073d" containerName="kube-state-metrics" containerID="cri-o://fb0c20459e760fe37e7dd417e2e2c41c270679803c66c362a1a03b343d388596" gracePeriod=30 Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.038015 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.038231 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="6cbad0ab-1254-496a-8d0c-88fc840cf17e" containerName="mysqld-exporter" containerID="cri-o://e43a8548ed7a0c94c09fcda2049fa0001f813a7af236bfed1823fcdf19e6f902" gracePeriod=30 Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.196675 4769 generic.go:334] "Generic (PLEG): container finished" podID="75a23be3-741b-4e4e-a168-4cc37f54073d" containerID="fb0c20459e760fe37e7dd417e2e2c41c270679803c66c362a1a03b343d388596" exitCode=2 Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.196782 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"75a23be3-741b-4e4e-a168-4cc37f54073d","Type":"ContainerDied","Data":"fb0c20459e760fe37e7dd417e2e2c41c270679803c66c362a1a03b343d388596"} Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.199512 4769 generic.go:334] "Generic (PLEG): container finished" podID="6cbad0ab-1254-496a-8d0c-88fc840cf17e" containerID="e43a8548ed7a0c94c09fcda2049fa0001f813a7af236bfed1823fcdf19e6f902" exitCode=2 Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.199556 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"6cbad0ab-1254-496a-8d0c-88fc840cf17e","Type":"ContainerDied","Data":"e43a8548ed7a0c94c09fcda2049fa0001f813a7af236bfed1823fcdf19e6f902"} Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.724641 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.735021 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.844839 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8bxf\" (UniqueName: \"kubernetes.io/projected/6cbad0ab-1254-496a-8d0c-88fc840cf17e-kube-api-access-p8bxf\") pod \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\" (UID: \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\") " Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.845036 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cbad0ab-1254-496a-8d0c-88fc840cf17e-combined-ca-bundle\") pod \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\" (UID: \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\") " Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.845071 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lskqw\" (UniqueName: \"kubernetes.io/projected/75a23be3-741b-4e4e-a168-4cc37f54073d-kube-api-access-lskqw\") pod \"75a23be3-741b-4e4e-a168-4cc37f54073d\" (UID: \"75a23be3-741b-4e4e-a168-4cc37f54073d\") " Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.845102 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cbad0ab-1254-496a-8d0c-88fc840cf17e-config-data\") pod \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\" (UID: \"6cbad0ab-1254-496a-8d0c-88fc840cf17e\") " Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.858637 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75a23be3-741b-4e4e-a168-4cc37f54073d-kube-api-access-lskqw" (OuterVolumeSpecName: "kube-api-access-lskqw") pod "75a23be3-741b-4e4e-a168-4cc37f54073d" (UID: "75a23be3-741b-4e4e-a168-4cc37f54073d"). InnerVolumeSpecName "kube-api-access-lskqw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.859923 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cbad0ab-1254-496a-8d0c-88fc840cf17e-kube-api-access-p8bxf" (OuterVolumeSpecName: "kube-api-access-p8bxf") pod "6cbad0ab-1254-496a-8d0c-88fc840cf17e" (UID: "6cbad0ab-1254-496a-8d0c-88fc840cf17e"). InnerVolumeSpecName "kube-api-access-p8bxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.887241 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cbad0ab-1254-496a-8d0c-88fc840cf17e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6cbad0ab-1254-496a-8d0c-88fc840cf17e" (UID: "6cbad0ab-1254-496a-8d0c-88fc840cf17e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.913817 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cbad0ab-1254-496a-8d0c-88fc840cf17e-config-data" (OuterVolumeSpecName: "config-data") pod "6cbad0ab-1254-496a-8d0c-88fc840cf17e" (UID: "6cbad0ab-1254-496a-8d0c-88fc840cf17e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.949226 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cbad0ab-1254-496a-8d0c-88fc840cf17e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.949280 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lskqw\" (UniqueName: \"kubernetes.io/projected/75a23be3-741b-4e4e-a168-4cc37f54073d-kube-api-access-lskqw\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.949294 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cbad0ab-1254-496a-8d0c-88fc840cf17e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:24 crc kubenswrapper[4769]: I1125 10:10:24.949304 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8bxf\" (UniqueName: \"kubernetes.io/projected/6cbad0ab-1254-496a-8d0c-88fc840cf17e-kube-api-access-p8bxf\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.211150 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"75a23be3-741b-4e4e-a168-4cc37f54073d","Type":"ContainerDied","Data":"1a9b71c7d4810835bc018756aff653f9f7d1e01184b99eb3ca28638b2a8f3c8c"} Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.211456 4769 scope.go:117] "RemoveContainer" containerID="fb0c20459e760fe37e7dd417e2e2c41c270679803c66c362a1a03b343d388596" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.211202 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.216384 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"6cbad0ab-1254-496a-8d0c-88fc840cf17e","Type":"ContainerDied","Data":"13f1ff57f73652a98cbeb633de1fb3c21add5b57253754deb465bde42627b857"} Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.216470 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.253406 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.268112 4769 scope.go:117] "RemoveContainer" containerID="e43a8548ed7a0c94c09fcda2049fa0001f813a7af236bfed1823fcdf19e6f902" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.274554 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.317374 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.330336 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.342931 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:10:25 crc kubenswrapper[4769]: E1125 10:10:25.343933 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cbad0ab-1254-496a-8d0c-88fc840cf17e" containerName="mysqld-exporter" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.343952 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cbad0ab-1254-496a-8d0c-88fc840cf17e" containerName="mysqld-exporter" Nov 25 10:10:25 crc kubenswrapper[4769]: E1125 10:10:25.343987 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75a23be3-741b-4e4e-a168-4cc37f54073d" containerName="kube-state-metrics" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.343995 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="75a23be3-741b-4e4e-a168-4cc37f54073d" containerName="kube-state-metrics" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.344237 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cbad0ab-1254-496a-8d0c-88fc840cf17e" containerName="mysqld-exporter" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.344268 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="75a23be3-741b-4e4e-a168-4cc37f54073d" containerName="kube-state-metrics" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.345311 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.347326 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.348339 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.367382 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stkb8\" (UniqueName: \"kubernetes.io/projected/f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9-kube-api-access-stkb8\") pod \"kube-state-metrics-0\" (UID: \"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.367438 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.367507 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.367567 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.380106 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.391713 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.393366 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.398308 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.398575 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.401713 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.469204 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.469251 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b3065e-13cc-4b73-a8e8-7c0c8a07379d-config-data\") pod \"mysqld-exporter-0\" (UID: \"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d\") " pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.469322 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7sjt\" (UniqueName: \"kubernetes.io/projected/f7b3065e-13cc-4b73-a8e8-7c0c8a07379d-kube-api-access-v7sjt\") pod \"mysqld-exporter-0\" (UID: \"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d\") " pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.469356 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7b3065e-13cc-4b73-a8e8-7c0c8a07379d-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d\") " pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.469383 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b3065e-13cc-4b73-a8e8-7c0c8a07379d-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d\") " pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.469544 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stkb8\" (UniqueName: \"kubernetes.io/projected/f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9-kube-api-access-stkb8\") pod \"kube-state-metrics-0\" (UID: \"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.469669 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.469975 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.473660 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.473696 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.484109 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.485088 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stkb8\" (UniqueName: \"kubernetes.io/projected/f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9-kube-api-access-stkb8\") pod \"kube-state-metrics-0\" (UID: \"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.571083 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b3065e-13cc-4b73-a8e8-7c0c8a07379d-config-data\") pod \"mysqld-exporter-0\" (UID: \"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d\") " pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.571186 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7sjt\" (UniqueName: \"kubernetes.io/projected/f7b3065e-13cc-4b73-a8e8-7c0c8a07379d-kube-api-access-v7sjt\") pod \"mysqld-exporter-0\" (UID: \"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d\") " pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.571225 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7b3065e-13cc-4b73-a8e8-7c0c8a07379d-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d\") " pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.571244 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b3065e-13cc-4b73-a8e8-7c0c8a07379d-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d\") " pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.577215 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7b3065e-13cc-4b73-a8e8-7c0c8a07379d-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d\") " pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.577356 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b3065e-13cc-4b73-a8e8-7c0c8a07379d-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d\") " pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.578744 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b3065e-13cc-4b73-a8e8-7c0c8a07379d-config-data\") pod \"mysqld-exporter-0\" (UID: \"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d\") " pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.610136 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7sjt\" (UniqueName: \"kubernetes.io/projected/f7b3065e-13cc-4b73-a8e8-7c0c8a07379d-kube-api-access-v7sjt\") pod \"mysqld-exporter-0\" (UID: \"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d\") " pod="openstack/mysqld-exporter-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.664937 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:10:25 crc kubenswrapper[4769]: I1125 10:10:25.709496 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 25 10:10:26 crc kubenswrapper[4769]: I1125 10:10:26.160669 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:10:26 crc kubenswrapper[4769]: I1125 10:10:26.231437 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9","Type":"ContainerStarted","Data":"83eae9d4a8443918994f8f73fe754c83a16580090cd1e4023f65ed437b5bb729"} Nov 25 10:10:26 crc kubenswrapper[4769]: I1125 10:10:26.250276 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cbad0ab-1254-496a-8d0c-88fc840cf17e" path="/var/lib/kubelet/pods/6cbad0ab-1254-496a-8d0c-88fc840cf17e/volumes" Nov 25 10:10:26 crc kubenswrapper[4769]: I1125 10:10:26.251734 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75a23be3-741b-4e4e-a168-4cc37f54073d" path="/var/lib/kubelet/pods/75a23be3-741b-4e4e-a168-4cc37f54073d/volumes" Nov 25 10:10:26 crc kubenswrapper[4769]: I1125 10:10:26.301836 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 25 10:10:26 crc kubenswrapper[4769]: I1125 10:10:26.386591 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:10:26 crc kubenswrapper[4769]: I1125 10:10:26.386887 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="proxy-httpd" containerID="cri-o://9de712960d357704cf284adea90f9e8be114bf6d46a25d85f5fb25be992292f7" gracePeriod=30 Nov 25 10:10:26 crc kubenswrapper[4769]: I1125 10:10:26.386990 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="sg-core" containerID="cri-o://ca4de0460af9d900786a83fa7a27a15416fbbc0104ed9a67f032bf6e577b57f4" gracePeriod=30 Nov 25 10:10:26 crc kubenswrapper[4769]: I1125 10:10:26.387066 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="ceilometer-notification-agent" containerID="cri-o://04d7ddd5ed4ce5857962927dbc6c403c9f2cc92190604db4beccd0e2f0dc7da4" gracePeriod=30 Nov 25 10:10:26 crc kubenswrapper[4769]: I1125 10:10:26.387229 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="ceilometer-central-agent" containerID="cri-o://7a3df91621e92af79b7382c95d50a6bfaad4ce55d25017498212b04efc37aeb6" gracePeriod=30 Nov 25 10:10:26 crc kubenswrapper[4769]: E1125 10:10:26.975505 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod370b0510_6672_4acc_b393_069cf70f4443.slice/crio-conmon-7a3df91621e92af79b7382c95d50a6bfaad4ce55d25017498212b04efc37aeb6.scope\": RecentStats: unable to find data in memory cache]" Nov 25 10:10:27 crc kubenswrapper[4769]: I1125 10:10:27.266015 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9","Type":"ContainerStarted","Data":"39fdd3d49d95b573499934de095fcfcb318fa3d8283bb175fb0e3bf40cc83179"} Nov 25 10:10:27 crc kubenswrapper[4769]: I1125 10:10:27.266531 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 10:10:27 crc kubenswrapper[4769]: I1125 10:10:27.268953 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d","Type":"ContainerStarted","Data":"c71814f106c209a11683e2d9c094a2f2c220a561a6e1befff238e3d07b908e50"} Nov 25 10:10:27 crc kubenswrapper[4769]: I1125 10:10:27.276088 4769 generic.go:334] "Generic (PLEG): container finished" podID="370b0510-6672-4acc-b393-069cf70f4443" containerID="9de712960d357704cf284adea90f9e8be114bf6d46a25d85f5fb25be992292f7" exitCode=0 Nov 25 10:10:27 crc kubenswrapper[4769]: I1125 10:10:27.276155 4769 generic.go:334] "Generic (PLEG): container finished" podID="370b0510-6672-4acc-b393-069cf70f4443" containerID="ca4de0460af9d900786a83fa7a27a15416fbbc0104ed9a67f032bf6e577b57f4" exitCode=2 Nov 25 10:10:27 crc kubenswrapper[4769]: I1125 10:10:27.276169 4769 generic.go:334] "Generic (PLEG): container finished" podID="370b0510-6672-4acc-b393-069cf70f4443" containerID="7a3df91621e92af79b7382c95d50a6bfaad4ce55d25017498212b04efc37aeb6" exitCode=0 Nov 25 10:10:27 crc kubenswrapper[4769]: I1125 10:10:27.276166 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b0510-6672-4acc-b393-069cf70f4443","Type":"ContainerDied","Data":"9de712960d357704cf284adea90f9e8be114bf6d46a25d85f5fb25be992292f7"} Nov 25 10:10:27 crc kubenswrapper[4769]: I1125 10:10:27.276239 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b0510-6672-4acc-b393-069cf70f4443","Type":"ContainerDied","Data":"ca4de0460af9d900786a83fa7a27a15416fbbc0104ed9a67f032bf6e577b57f4"} Nov 25 10:10:27 crc kubenswrapper[4769]: I1125 10:10:27.276262 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b0510-6672-4acc-b393-069cf70f4443","Type":"ContainerDied","Data":"7a3df91621e92af79b7382c95d50a6bfaad4ce55d25017498212b04efc37aeb6"} Nov 25 10:10:27 crc kubenswrapper[4769]: I1125 10:10:27.286036 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.8769601200000001 podStartE2EDuration="2.286015168s" podCreationTimestamp="2025-11-25 10:10:25 +0000 UTC" firstStartedPulling="2025-11-25 10:10:26.166131812 +0000 UTC m=+1574.751104135" lastFinishedPulling="2025-11-25 10:10:26.57518687 +0000 UTC m=+1575.160159183" observedRunningTime="2025-11-25 10:10:27.284761975 +0000 UTC m=+1575.869734328" watchObservedRunningTime="2025-11-25 10:10:27.286015168 +0000 UTC m=+1575.870987481" Nov 25 10:10:28 crc kubenswrapper[4769]: I1125 10:10:28.293522 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"f7b3065e-13cc-4b73-a8e8-7c0c8a07379d","Type":"ContainerStarted","Data":"7a297833d400e7de14672abc08ce27daa1d57880f70d9fa6f94f8e28163285b5"} Nov 25 10:10:28 crc kubenswrapper[4769]: I1125 10:10:28.323395 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.843788371 podStartE2EDuration="3.323374688s" podCreationTimestamp="2025-11-25 10:10:25 +0000 UTC" firstStartedPulling="2025-11-25 10:10:26.30605964 +0000 UTC m=+1574.891031953" lastFinishedPulling="2025-11-25 10:10:26.785645967 +0000 UTC m=+1575.370618270" observedRunningTime="2025-11-25 10:10:28.320610337 +0000 UTC m=+1576.905582690" watchObservedRunningTime="2025-11-25 10:10:28.323374688 +0000 UTC m=+1576.908347001" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.330268 4769 generic.go:334] "Generic (PLEG): container finished" podID="370b0510-6672-4acc-b393-069cf70f4443" containerID="04d7ddd5ed4ce5857962927dbc6c403c9f2cc92190604db4beccd0e2f0dc7da4" exitCode=0 Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.330830 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b0510-6672-4acc-b393-069cf70f4443","Type":"ContainerDied","Data":"04d7ddd5ed4ce5857962927dbc6c403c9f2cc92190604db4beccd0e2f0dc7da4"} Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.687845 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.807257 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b0510-6672-4acc-b393-069cf70f4443-log-httpd\") pod \"370b0510-6672-4acc-b393-069cf70f4443\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.807348 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-scripts\") pod \"370b0510-6672-4acc-b393-069cf70f4443\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.807521 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-sg-core-conf-yaml\") pod \"370b0510-6672-4acc-b393-069cf70f4443\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.807605 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b0510-6672-4acc-b393-069cf70f4443-run-httpd\") pod \"370b0510-6672-4acc-b393-069cf70f4443\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.807642 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-combined-ca-bundle\") pod \"370b0510-6672-4acc-b393-069cf70f4443\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.807748 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kckb2\" (UniqueName: \"kubernetes.io/projected/370b0510-6672-4acc-b393-069cf70f4443-kube-api-access-kckb2\") pod \"370b0510-6672-4acc-b393-069cf70f4443\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.807833 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-config-data\") pod \"370b0510-6672-4acc-b393-069cf70f4443\" (UID: \"370b0510-6672-4acc-b393-069cf70f4443\") " Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.808490 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/370b0510-6672-4acc-b393-069cf70f4443-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "370b0510-6672-4acc-b393-069cf70f4443" (UID: "370b0510-6672-4acc-b393-069cf70f4443"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.808985 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/370b0510-6672-4acc-b393-069cf70f4443-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "370b0510-6672-4acc-b393-069cf70f4443" (UID: "370b0510-6672-4acc-b393-069cf70f4443"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.809187 4769 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b0510-6672-4acc-b393-069cf70f4443-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.809209 4769 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/370b0510-6672-4acc-b393-069cf70f4443-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.818191 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/370b0510-6672-4acc-b393-069cf70f4443-kube-api-access-kckb2" (OuterVolumeSpecName: "kube-api-access-kckb2") pod "370b0510-6672-4acc-b393-069cf70f4443" (UID: "370b0510-6672-4acc-b393-069cf70f4443"). InnerVolumeSpecName "kube-api-access-kckb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.839361 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-scripts" (OuterVolumeSpecName: "scripts") pod "370b0510-6672-4acc-b393-069cf70f4443" (UID: "370b0510-6672-4acc-b393-069cf70f4443"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.853863 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "370b0510-6672-4acc-b393-069cf70f4443" (UID: "370b0510-6672-4acc-b393-069cf70f4443"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.911065 4769 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.911315 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kckb2\" (UniqueName: \"kubernetes.io/projected/370b0510-6672-4acc-b393-069cf70f4443-kube-api-access-kckb2\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.911326 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.936476 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "370b0510-6672-4acc-b393-069cf70f4443" (UID: "370b0510-6672-4acc-b393-069cf70f4443"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:10:29 crc kubenswrapper[4769]: I1125 10:10:29.978875 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-config-data" (OuterVolumeSpecName: "config-data") pod "370b0510-6672-4acc-b393-069cf70f4443" (UID: "370b0510-6672-4acc-b393-069cf70f4443"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.014062 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.014314 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/370b0510-6672-4acc-b393-069cf70f4443-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.346586 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"370b0510-6672-4acc-b393-069cf70f4443","Type":"ContainerDied","Data":"0501c3e438c32c0ceed429c7c01881f029dbab877905be0d64d3c938561ecb41"} Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.346639 4769 scope.go:117] "RemoveContainer" containerID="9de712960d357704cf284adea90f9e8be114bf6d46a25d85f5fb25be992292f7" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.346719 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.378357 4769 scope.go:117] "RemoveContainer" containerID="ca4de0460af9d900786a83fa7a27a15416fbbc0104ed9a67f032bf6e577b57f4" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.410432 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.436916 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.442211 4769 scope.go:117] "RemoveContainer" containerID="04d7ddd5ed4ce5857962927dbc6c403c9f2cc92190604db4beccd0e2f0dc7da4" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.454486 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:10:30 crc kubenswrapper[4769]: E1125 10:10:30.455330 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="ceilometer-central-agent" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.455364 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="ceilometer-central-agent" Nov 25 10:10:30 crc kubenswrapper[4769]: E1125 10:10:30.455422 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="proxy-httpd" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.455435 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="proxy-httpd" Nov 25 10:10:30 crc kubenswrapper[4769]: E1125 10:10:30.455466 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="sg-core" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.455479 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="sg-core" Nov 25 10:10:30 crc kubenswrapper[4769]: E1125 10:10:30.455530 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="ceilometer-notification-agent" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.455543 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="ceilometer-notification-agent" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.455956 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="proxy-httpd" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.456007 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="ceilometer-central-agent" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.456049 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="ceilometer-notification-agent" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.456082 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="370b0510-6672-4acc-b393-069cf70f4443" containerName="sg-core" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.460997 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.468430 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.468687 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.468929 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.469658 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.498239 4769 scope.go:117] "RemoveContainer" containerID="7a3df91621e92af79b7382c95d50a6bfaad4ce55d25017498212b04efc37aeb6" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.527955 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.528147 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.528323 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czfg7\" (UniqueName: \"kubernetes.io/projected/16b8c066-592e-4ec8-99e9-aca97ded614a-kube-api-access-czfg7\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.528403 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-scripts\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.528534 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.528777 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-config-data\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.528829 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16b8c066-592e-4ec8-99e9-aca97ded614a-log-httpd\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.529232 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16b8c066-592e-4ec8-99e9-aca97ded614a-run-httpd\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.631063 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czfg7\" (UniqueName: \"kubernetes.io/projected/16b8c066-592e-4ec8-99e9-aca97ded614a-kube-api-access-czfg7\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.631110 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-scripts\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.631173 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.631234 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-config-data\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.631255 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16b8c066-592e-4ec8-99e9-aca97ded614a-log-httpd\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.631342 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16b8c066-592e-4ec8-99e9-aca97ded614a-run-httpd\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.631410 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.631446 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.635219 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16b8c066-592e-4ec8-99e9-aca97ded614a-run-httpd\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.635549 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16b8c066-592e-4ec8-99e9-aca97ded614a-log-httpd\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.642600 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.643017 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.643260 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.649543 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-scripts\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.656488 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-config-data\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.656810 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czfg7\" (UniqueName: \"kubernetes.io/projected/16b8c066-592e-4ec8-99e9-aca97ded614a-kube-api-access-czfg7\") pod \"ceilometer-0\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " pod="openstack/ceilometer-0" Nov 25 10:10:30 crc kubenswrapper[4769]: I1125 10:10:30.804700 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.340034 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.368223 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16b8c066-592e-4ec8-99e9-aca97ded614a","Type":"ContainerStarted","Data":"2dc226269798ae68646ba71f6ba41bbfefc4f5cd9662a6f4e3b0eaa724e0c132"} Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.491899 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-5tt9b"] Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.504555 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-5tt9b"] Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.604158 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-gvtk6"] Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.607413 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-gvtk6" Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.628648 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-gvtk6"] Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.681854 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/495df49b-2ea1-4ee3-8d6d-5607d639d308-config-data\") pod \"heat-db-sync-gvtk6\" (UID: \"495df49b-2ea1-4ee3-8d6d-5607d639d308\") " pod="openstack/heat-db-sync-gvtk6" Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.681940 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8zpk\" (UniqueName: \"kubernetes.io/projected/495df49b-2ea1-4ee3-8d6d-5607d639d308-kube-api-access-v8zpk\") pod \"heat-db-sync-gvtk6\" (UID: \"495df49b-2ea1-4ee3-8d6d-5607d639d308\") " pod="openstack/heat-db-sync-gvtk6" Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.682023 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/495df49b-2ea1-4ee3-8d6d-5607d639d308-combined-ca-bundle\") pod \"heat-db-sync-gvtk6\" (UID: \"495df49b-2ea1-4ee3-8d6d-5607d639d308\") " pod="openstack/heat-db-sync-gvtk6" Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.784656 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/495df49b-2ea1-4ee3-8d6d-5607d639d308-config-data\") pod \"heat-db-sync-gvtk6\" (UID: \"495df49b-2ea1-4ee3-8d6d-5607d639d308\") " pod="openstack/heat-db-sync-gvtk6" Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.784724 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8zpk\" (UniqueName: \"kubernetes.io/projected/495df49b-2ea1-4ee3-8d6d-5607d639d308-kube-api-access-v8zpk\") pod \"heat-db-sync-gvtk6\" (UID: \"495df49b-2ea1-4ee3-8d6d-5607d639d308\") " pod="openstack/heat-db-sync-gvtk6" Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.784746 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/495df49b-2ea1-4ee3-8d6d-5607d639d308-combined-ca-bundle\") pod \"heat-db-sync-gvtk6\" (UID: \"495df49b-2ea1-4ee3-8d6d-5607d639d308\") " pod="openstack/heat-db-sync-gvtk6" Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.797552 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/495df49b-2ea1-4ee3-8d6d-5607d639d308-combined-ca-bundle\") pod \"heat-db-sync-gvtk6\" (UID: \"495df49b-2ea1-4ee3-8d6d-5607d639d308\") " pod="openstack/heat-db-sync-gvtk6" Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.797843 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/495df49b-2ea1-4ee3-8d6d-5607d639d308-config-data\") pod \"heat-db-sync-gvtk6\" (UID: \"495df49b-2ea1-4ee3-8d6d-5607d639d308\") " pod="openstack/heat-db-sync-gvtk6" Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.803460 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8zpk\" (UniqueName: \"kubernetes.io/projected/495df49b-2ea1-4ee3-8d6d-5607d639d308-kube-api-access-v8zpk\") pod \"heat-db-sync-gvtk6\" (UID: \"495df49b-2ea1-4ee3-8d6d-5607d639d308\") " pod="openstack/heat-db-sync-gvtk6" Nov 25 10:10:31 crc kubenswrapper[4769]: I1125 10:10:31.929755 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-gvtk6" Nov 25 10:10:32 crc kubenswrapper[4769]: I1125 10:10:32.257515 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="370b0510-6672-4acc-b393-069cf70f4443" path="/var/lib/kubelet/pods/370b0510-6672-4acc-b393-069cf70f4443/volumes" Nov 25 10:10:32 crc kubenswrapper[4769]: I1125 10:10:32.325957 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="639faafa-26cf-4b2e-831b-bc95e327cb3b" path="/var/lib/kubelet/pods/639faafa-26cf-4b2e-831b-bc95e327cb3b/volumes" Nov 25 10:10:32 crc kubenswrapper[4769]: I1125 10:10:32.479269 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-gvtk6"] Nov 25 10:10:32 crc kubenswrapper[4769]: W1125 10:10:32.480004 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod495df49b_2ea1_4ee3_8d6d_5607d639d308.slice/crio-669ef02bbe789a8d79e3dfc6fb816b20e269649834ab58eed3e4abc19d2be6c0 WatchSource:0}: Error finding container 669ef02bbe789a8d79e3dfc6fb816b20e269649834ab58eed3e4abc19d2be6c0: Status 404 returned error can't find the container with id 669ef02bbe789a8d79e3dfc6fb816b20e269649834ab58eed3e4abc19d2be6c0 Nov 25 10:10:33 crc kubenswrapper[4769]: I1125 10:10:33.402724 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-gvtk6" event={"ID":"495df49b-2ea1-4ee3-8d6d-5607d639d308","Type":"ContainerStarted","Data":"669ef02bbe789a8d79e3dfc6fb816b20e269649834ab58eed3e4abc19d2be6c0"} Nov 25 10:10:34 crc kubenswrapper[4769]: I1125 10:10:34.153638 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:10:34 crc kubenswrapper[4769]: I1125 10:10:34.253312 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:10:34 crc kubenswrapper[4769]: E1125 10:10:34.257145 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:10:34 crc kubenswrapper[4769]: I1125 10:10:34.322318 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:10:34 crc kubenswrapper[4769]: I1125 10:10:34.687899 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:10:35 crc kubenswrapper[4769]: I1125 10:10:35.865278 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 10:10:36 crc kubenswrapper[4769]: I1125 10:10:36.471256 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16b8c066-592e-4ec8-99e9-aca97ded614a","Type":"ContainerStarted","Data":"98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530"} Nov 25 10:10:40 crc kubenswrapper[4769]: I1125 10:10:40.982957 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="981369ae-93f2-4c25-bdea-d3d89686b0d5" containerName="rabbitmq" containerID="cri-o://e89816873ee41f43364f9d484d1ce5af7f6013fdc49031678da977a414617fba" gracePeriod=604794 Nov 25 10:10:41 crc kubenswrapper[4769]: I1125 10:10:41.104716 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="4857aaf8-4133-4c20-bc8c-d4d195091176" containerName="rabbitmq" containerID="cri-o://1d15ed991d65b7745c50096bb73a804672d4002840b23053fddc04ecdab15ac0" gracePeriod=604794 Nov 25 10:10:41 crc kubenswrapper[4769]: I1125 10:10:41.546859 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16b8c066-592e-4ec8-99e9-aca97ded614a","Type":"ContainerStarted","Data":"a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2"} Nov 25 10:10:42 crc kubenswrapper[4769]: I1125 10:10:42.577512 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16b8c066-592e-4ec8-99e9-aca97ded614a","Type":"ContainerStarted","Data":"f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a"} Nov 25 10:10:44 crc kubenswrapper[4769]: I1125 10:10:44.609938 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16b8c066-592e-4ec8-99e9-aca97ded614a","Type":"ContainerStarted","Data":"679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63"} Nov 25 10:10:44 crc kubenswrapper[4769]: I1125 10:10:44.611133 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:10:44 crc kubenswrapper[4769]: I1125 10:10:44.610182 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="proxy-httpd" containerID="cri-o://679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63" gracePeriod=30 Nov 25 10:10:44 crc kubenswrapper[4769]: I1125 10:10:44.610107 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="ceilometer-central-agent" containerID="cri-o://98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530" gracePeriod=30 Nov 25 10:10:44 crc kubenswrapper[4769]: I1125 10:10:44.610256 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="ceilometer-notification-agent" containerID="cri-o://a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2" gracePeriod=30 Nov 25 10:10:44 crc kubenswrapper[4769]: I1125 10:10:44.610295 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="sg-core" containerID="cri-o://f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a" gracePeriod=30 Nov 25 10:10:44 crc kubenswrapper[4769]: I1125 10:10:44.652497 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.988012468 podStartE2EDuration="14.652462317s" podCreationTimestamp="2025-11-25 10:10:30 +0000 UTC" firstStartedPulling="2025-11-25 10:10:31.354428142 +0000 UTC m=+1579.939400465" lastFinishedPulling="2025-11-25 10:10:44.018878001 +0000 UTC m=+1592.603850314" observedRunningTime="2025-11-25 10:10:44.639193656 +0000 UTC m=+1593.224165989" watchObservedRunningTime="2025-11-25 10:10:44.652462317 +0000 UTC m=+1593.237434630" Nov 25 10:10:44 crc kubenswrapper[4769]: I1125 10:10:44.808602 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="981369ae-93f2-4c25-bdea-d3d89686b0d5" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.132:5671: connect: connection refused" Nov 25 10:10:45 crc kubenswrapper[4769]: I1125 10:10:45.107984 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="4857aaf8-4133-4c20-bc8c-d4d195091176" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.133:5671: connect: connection refused" Nov 25 10:10:45 crc kubenswrapper[4769]: I1125 10:10:45.628758 4769 generic.go:334] "Generic (PLEG): container finished" podID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerID="f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a" exitCode=2 Nov 25 10:10:45 crc kubenswrapper[4769]: I1125 10:10:45.629274 4769 generic.go:334] "Generic (PLEG): container finished" podID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerID="a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2" exitCode=0 Nov 25 10:10:45 crc kubenswrapper[4769]: I1125 10:10:45.628870 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16b8c066-592e-4ec8-99e9-aca97ded614a","Type":"ContainerDied","Data":"f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a"} Nov 25 10:10:45 crc kubenswrapper[4769]: I1125 10:10:45.629335 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16b8c066-592e-4ec8-99e9-aca97ded614a","Type":"ContainerDied","Data":"a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2"} Nov 25 10:10:47 crc kubenswrapper[4769]: I1125 10:10:47.237557 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:10:47 crc kubenswrapper[4769]: E1125 10:10:47.237843 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:10:47 crc kubenswrapper[4769]: I1125 10:10:47.663373 4769 generic.go:334] "Generic (PLEG): container finished" podID="981369ae-93f2-4c25-bdea-d3d89686b0d5" containerID="e89816873ee41f43364f9d484d1ce5af7f6013fdc49031678da977a414617fba" exitCode=0 Nov 25 10:10:47 crc kubenswrapper[4769]: I1125 10:10:47.663603 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"981369ae-93f2-4c25-bdea-d3d89686b0d5","Type":"ContainerDied","Data":"e89816873ee41f43364f9d484d1ce5af7f6013fdc49031678da977a414617fba"} Nov 25 10:10:47 crc kubenswrapper[4769]: I1125 10:10:47.667091 4769 generic.go:334] "Generic (PLEG): container finished" podID="4857aaf8-4133-4c20-bc8c-d4d195091176" containerID="1d15ed991d65b7745c50096bb73a804672d4002840b23053fddc04ecdab15ac0" exitCode=0 Nov 25 10:10:47 crc kubenswrapper[4769]: I1125 10:10:47.667146 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4857aaf8-4133-4c20-bc8c-d4d195091176","Type":"ContainerDied","Data":"1d15ed991d65b7745c50096bb73a804672d4002840b23053fddc04ecdab15ac0"} Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.761595 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68df85789f-xbxtj"] Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.763932 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.779253 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-xbxtj"] Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.835891 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.867006 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-dns-svc\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.867102 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.867191 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.867291 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-config\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.867316 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8x4k\" (UniqueName: \"kubernetes.io/projected/891399fa-746b-40b9-b3b4-a3a7afe40aa6-kube-api-access-b8x4k\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.867337 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.867357 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.970083 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.970228 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-config\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.970257 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8x4k\" (UniqueName: \"kubernetes.io/projected/891399fa-746b-40b9-b3b4-a3a7afe40aa6-kube-api-access-b8x4k\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.970284 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.970311 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.970360 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-dns-svc\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.970399 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.973842 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-ovsdbserver-sb\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.973920 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-ovsdbserver-nb\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.974233 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-config\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.974356 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-openstack-edpm-ipam\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.974430 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-dns-swift-storage-0\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:57 crc kubenswrapper[4769]: I1125 10:10:57.974475 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-dns-svc\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:58 crc kubenswrapper[4769]: I1125 10:10:58.187732 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8x4k\" (UniqueName: \"kubernetes.io/projected/891399fa-746b-40b9-b3b4-a3a7afe40aa6-kube-api-access-b8x4k\") pod \"dnsmasq-dns-68df85789f-xbxtj\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:58 crc kubenswrapper[4769]: I1125 10:10:58.474109 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.073145 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.083783 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107307 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"4857aaf8-4133-4c20-bc8c-d4d195091176\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107439 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/981369ae-93f2-4c25-bdea-d3d89686b0d5-erlang-cookie-secret\") pod \"981369ae-93f2-4c25-bdea-d3d89686b0d5\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107471 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-server-conf\") pod \"981369ae-93f2-4c25-bdea-d3d89686b0d5\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107520 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-plugins-conf\") pod \"4857aaf8-4133-4c20-bc8c-d4d195091176\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107566 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-plugins-conf\") pod \"981369ae-93f2-4c25-bdea-d3d89686b0d5\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107627 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-confd\") pod \"4857aaf8-4133-4c20-bc8c-d4d195091176\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107649 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-server-conf\") pod \"4857aaf8-4133-4c20-bc8c-d4d195091176\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107716 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-config-data\") pod \"4857aaf8-4133-4c20-bc8c-d4d195091176\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107748 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-plugins\") pod \"4857aaf8-4133-4c20-bc8c-d4d195091176\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107827 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-tls\") pod \"4857aaf8-4133-4c20-bc8c-d4d195091176\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107854 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4857aaf8-4133-4c20-bc8c-d4d195091176-erlang-cookie-secret\") pod \"4857aaf8-4133-4c20-bc8c-d4d195091176\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107907 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/981369ae-93f2-4c25-bdea-d3d89686b0d5-pod-info\") pod \"981369ae-93f2-4c25-bdea-d3d89686b0d5\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.107930 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-plugins\") pod \"981369ae-93f2-4c25-bdea-d3d89686b0d5\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.108025 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-config-data\") pod \"981369ae-93f2-4c25-bdea-d3d89686b0d5\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.108048 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-confd\") pod \"981369ae-93f2-4c25-bdea-d3d89686b0d5\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.108081 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8pjv\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-kube-api-access-z8pjv\") pod \"981369ae-93f2-4c25-bdea-d3d89686b0d5\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.108125 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-tls\") pod \"981369ae-93f2-4c25-bdea-d3d89686b0d5\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.108164 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-erlang-cookie\") pod \"4857aaf8-4133-4c20-bc8c-d4d195091176\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.108202 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"981369ae-93f2-4c25-bdea-d3d89686b0d5\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.108246 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8znv\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-kube-api-access-d8znv\") pod \"4857aaf8-4133-4c20-bc8c-d4d195091176\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.108293 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4857aaf8-4133-4c20-bc8c-d4d195091176-pod-info\") pod \"4857aaf8-4133-4c20-bc8c-d4d195091176\" (UID: \"4857aaf8-4133-4c20-bc8c-d4d195091176\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.108347 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-erlang-cookie\") pod \"981369ae-93f2-4c25-bdea-d3d89686b0d5\" (UID: \"981369ae-93f2-4c25-bdea-d3d89686b0d5\") " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.108740 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "4857aaf8-4133-4c20-bc8c-d4d195091176" (UID: "4857aaf8-4133-4c20-bc8c-d4d195091176"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.109252 4769 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.134004 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4857aaf8-4133-4c20-bc8c-d4d195091176-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "4857aaf8-4133-4c20-bc8c-d4d195091176" (UID: "4857aaf8-4133-4c20-bc8c-d4d195091176"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.134115 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/4857aaf8-4133-4c20-bc8c-d4d195091176-pod-info" (OuterVolumeSpecName: "pod-info") pod "4857aaf8-4133-4c20-bc8c-d4d195091176" (UID: "4857aaf8-4133-4c20-bc8c-d4d195091176"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.134285 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "981369ae-93f2-4c25-bdea-d3d89686b0d5" (UID: "981369ae-93f2-4c25-bdea-d3d89686b0d5"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.134463 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-kube-api-access-d8znv" (OuterVolumeSpecName: "kube-api-access-d8znv") pod "4857aaf8-4133-4c20-bc8c-d4d195091176" (UID: "4857aaf8-4133-4c20-bc8c-d4d195091176"). InnerVolumeSpecName "kube-api-access-d8znv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.134670 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "4857aaf8-4133-4c20-bc8c-d4d195091176" (UID: "4857aaf8-4133-4c20-bc8c-d4d195091176"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.134575 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "4857aaf8-4133-4c20-bc8c-d4d195091176" (UID: "4857aaf8-4133-4c20-bc8c-d4d195091176"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.139847 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "4857aaf8-4133-4c20-bc8c-d4d195091176" (UID: "4857aaf8-4133-4c20-bc8c-d4d195091176"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.164678 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/981369ae-93f2-4c25-bdea-d3d89686b0d5-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "981369ae-93f2-4c25-bdea-d3d89686b0d5" (UID: "981369ae-93f2-4c25-bdea-d3d89686b0d5"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.180006 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/981369ae-93f2-4c25-bdea-d3d89686b0d5-pod-info" (OuterVolumeSpecName: "pod-info") pod "981369ae-93f2-4c25-bdea-d3d89686b0d5" (UID: "981369ae-93f2-4c25-bdea-d3d89686b0d5"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.182580 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "981369ae-93f2-4c25-bdea-d3d89686b0d5" (UID: "981369ae-93f2-4c25-bdea-d3d89686b0d5"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.185546 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "4857aaf8-4133-4c20-bc8c-d4d195091176" (UID: "4857aaf8-4133-4c20-bc8c-d4d195091176"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.193731 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-kube-api-access-z8pjv" (OuterVolumeSpecName: "kube-api-access-z8pjv") pod "981369ae-93f2-4c25-bdea-d3d89686b0d5" (UID: "981369ae-93f2-4c25-bdea-d3d89686b0d5"). InnerVolumeSpecName "kube-api-access-z8pjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.202576 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "981369ae-93f2-4c25-bdea-d3d89686b0d5" (UID: "981369ae-93f2-4c25-bdea-d3d89686b0d5"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.206237 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "981369ae-93f2-4c25-bdea-d3d89686b0d5" (UID: "981369ae-93f2-4c25-bdea-d3d89686b0d5"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.218763 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "981369ae-93f2-4c25-bdea-d3d89686b0d5" (UID: "981369ae-93f2-4c25-bdea-d3d89686b0d5"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.279541 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.279696 4769 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: E1125 10:10:59.280003 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280182 4769 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4857aaf8-4133-4c20-bc8c-d4d195091176-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280205 4769 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280215 4769 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/981369ae-93f2-4c25-bdea-d3d89686b0d5-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280225 4769 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280235 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8pjv\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-kube-api-access-z8pjv\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280253 4769 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280263 4769 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280292 4769 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280302 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8znv\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-kube-api-access-d8znv\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280313 4769 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4857aaf8-4133-4c20-bc8c-d4d195091176-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280325 4769 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280339 4769 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280350 4769 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/981369ae-93f2-4c25-bdea-d3d89686b0d5-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.280359 4769 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.354796 4769 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.371454 4769 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.387376 4769 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.387408 4769 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.389559 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-config-data" (OuterVolumeSpecName: "config-data") pod "981369ae-93f2-4c25-bdea-d3d89686b0d5" (UID: "981369ae-93f2-4c25-bdea-d3d89686b0d5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.404894 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-server-conf" (OuterVolumeSpecName: "server-conf") pod "4857aaf8-4133-4c20-bc8c-d4d195091176" (UID: "4857aaf8-4133-4c20-bc8c-d4d195091176"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.445914 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-config-data" (OuterVolumeSpecName: "config-data") pod "4857aaf8-4133-4c20-bc8c-d4d195091176" (UID: "4857aaf8-4133-4c20-bc8c-d4d195091176"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.457581 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-server-conf" (OuterVolumeSpecName: "server-conf") pod "981369ae-93f2-4c25-bdea-d3d89686b0d5" (UID: "981369ae-93f2-4c25-bdea-d3d89686b0d5"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.489466 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.489735 4769 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/981369ae-93f2-4c25-bdea-d3d89686b0d5-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.489798 4769 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.489860 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4857aaf8-4133-4c20-bc8c-d4d195091176-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.663182 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "981369ae-93f2-4c25-bdea-d3d89686b0d5" (UID: "981369ae-93f2-4c25-bdea-d3d89686b0d5"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.670571 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "4857aaf8-4133-4c20-bc8c-d4d195091176" (UID: "4857aaf8-4133-4c20-bc8c-d4d195091176"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.700636 4769 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4857aaf8-4133-4c20-bc8c-d4d195091176-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.700666 4769 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/981369ae-93f2-4c25-bdea-d3d89686b0d5-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.808496 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="981369ae-93f2-4c25-bdea-d3d89686b0d5" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.132:5671: i/o timeout" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.934738 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"4857aaf8-4133-4c20-bc8c-d4d195091176","Type":"ContainerDied","Data":"f40e71ab65d74e5d0367494f2a420b267a77505c2df1aa23eeef6dabf10272d8"} Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.934802 4769 scope.go:117] "RemoveContainer" containerID="1d15ed991d65b7745c50096bb73a804672d4002840b23053fddc04ecdab15ac0" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.934993 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.940983 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"981369ae-93f2-4c25-bdea-d3d89686b0d5","Type":"ContainerDied","Data":"91e89b078c5be26a12c4bd313bea66caccf2d4a847c3cf136dc471af97493c30"} Nov 25 10:10:59 crc kubenswrapper[4769]: I1125 10:10:59.941126 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.006872 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.026300 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.040195 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.055448 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.069050 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:11:00 crc kubenswrapper[4769]: E1125 10:11:00.069949 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4857aaf8-4133-4c20-bc8c-d4d195091176" containerName="rabbitmq" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.069996 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4857aaf8-4133-4c20-bc8c-d4d195091176" containerName="rabbitmq" Nov 25 10:11:00 crc kubenswrapper[4769]: E1125 10:11:00.070024 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="981369ae-93f2-4c25-bdea-d3d89686b0d5" containerName="setup-container" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.070034 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="981369ae-93f2-4c25-bdea-d3d89686b0d5" containerName="setup-container" Nov 25 10:11:00 crc kubenswrapper[4769]: E1125 10:11:00.070079 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4857aaf8-4133-4c20-bc8c-d4d195091176" containerName="setup-container" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.070089 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4857aaf8-4133-4c20-bc8c-d4d195091176" containerName="setup-container" Nov 25 10:11:00 crc kubenswrapper[4769]: E1125 10:11:00.070122 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="981369ae-93f2-4c25-bdea-d3d89686b0d5" containerName="rabbitmq" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.070131 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="981369ae-93f2-4c25-bdea-d3d89686b0d5" containerName="rabbitmq" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.070448 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4857aaf8-4133-4c20-bc8c-d4d195091176" containerName="rabbitmq" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.070489 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="981369ae-93f2-4c25-bdea-d3d89686b0d5" containerName="rabbitmq" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.072501 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.080223 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.082682 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.093495 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-zjtmv" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.093708 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.093943 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.094868 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.095080 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.095103 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.095429 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.095436 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.095621 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.095652 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.095753 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.095812 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.096542 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-9tnjw" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.097042 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.097288 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.107197 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.110139 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="4857aaf8-4133-4c20-bc8c-d4d195091176" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.133:5671: i/o timeout" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213111 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213163 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8b992919-0b8a-425d-9e1a-aec914a91965-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213213 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/df64d497-1d94-46ba-b773-da7ade77177a-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213246 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/df64d497-1d94-46ba-b773-da7ade77177a-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213269 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/df64d497-1d94-46ba-b773-da7ade77177a-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213300 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2ffc\" (UniqueName: \"kubernetes.io/projected/df64d497-1d94-46ba-b773-da7ade77177a-kube-api-access-d2ffc\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213316 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8b992919-0b8a-425d-9e1a-aec914a91965-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213343 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8b992919-0b8a-425d-9e1a-aec914a91965-config-data\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213362 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213397 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8b992919-0b8a-425d-9e1a-aec914a91965-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213416 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/df64d497-1d94-46ba-b773-da7ade77177a-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213433 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/df64d497-1d94-46ba-b773-da7ade77177a-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213505 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/df64d497-1d94-46ba-b773-da7ade77177a-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213524 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/df64d497-1d94-46ba-b773-da7ade77177a-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213552 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8b992919-0b8a-425d-9e1a-aec914a91965-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213581 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8b992919-0b8a-425d-9e1a-aec914a91965-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213599 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/df64d497-1d94-46ba-b773-da7ade77177a-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213616 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/df64d497-1d94-46ba-b773-da7ade77177a-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213635 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8b992919-0b8a-425d-9e1a-aec914a91965-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213650 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8b992919-0b8a-425d-9e1a-aec914a91965-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213670 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8b992919-0b8a-425d-9e1a-aec914a91965-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.213684 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hch96\" (UniqueName: \"kubernetes.io/projected/8b992919-0b8a-425d-9e1a-aec914a91965-kube-api-access-hch96\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.255733 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4857aaf8-4133-4c20-bc8c-d4d195091176" path="/var/lib/kubelet/pods/4857aaf8-4133-4c20-bc8c-d4d195091176/volumes" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.259114 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="981369ae-93f2-4c25-bdea-d3d89686b0d5" path="/var/lib/kubelet/pods/981369ae-93f2-4c25-bdea-d3d89686b0d5/volumes" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.317436 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/df64d497-1d94-46ba-b773-da7ade77177a-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.317703 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/df64d497-1d94-46ba-b773-da7ade77177a-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.317857 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8b992919-0b8a-425d-9e1a-aec914a91965-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.317997 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8b992919-0b8a-425d-9e1a-aec914a91965-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318099 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/df64d497-1d94-46ba-b773-da7ade77177a-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318166 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/df64d497-1d94-46ba-b773-da7ade77177a-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318203 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/df64d497-1d94-46ba-b773-da7ade77177a-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318307 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8b992919-0b8a-425d-9e1a-aec914a91965-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318345 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8b992919-0b8a-425d-9e1a-aec914a91965-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318383 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8b992919-0b8a-425d-9e1a-aec914a91965-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318400 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hch96\" (UniqueName: \"kubernetes.io/projected/8b992919-0b8a-425d-9e1a-aec914a91965-kube-api-access-hch96\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318498 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318570 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8b992919-0b8a-425d-9e1a-aec914a91965-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318702 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/df64d497-1d94-46ba-b773-da7ade77177a-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318778 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/df64d497-1d94-46ba-b773-da7ade77177a-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318838 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/df64d497-1d94-46ba-b773-da7ade77177a-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318911 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2ffc\" (UniqueName: \"kubernetes.io/projected/df64d497-1d94-46ba-b773-da7ade77177a-kube-api-access-d2ffc\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.318938 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8b992919-0b8a-425d-9e1a-aec914a91965-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.319014 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8b992919-0b8a-425d-9e1a-aec914a91965-config-data\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.319035 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.319064 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8b992919-0b8a-425d-9e1a-aec914a91965-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.319086 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/df64d497-1d94-46ba-b773-da7ade77177a-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.319100 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/df64d497-1d94-46ba-b773-da7ade77177a-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.319627 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/df64d497-1d94-46ba-b773-da7ade77177a-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.319908 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8b992919-0b8a-425d-9e1a-aec914a91965-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.320092 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.320362 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/df64d497-1d94-46ba-b773-da7ade77177a-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.320492 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8b992919-0b8a-425d-9e1a-aec914a91965-config-data\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.321374 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/df64d497-1d94-46ba-b773-da7ade77177a-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.321572 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8b992919-0b8a-425d-9e1a-aec914a91965-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.321729 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8b992919-0b8a-425d-9e1a-aec914a91965-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.322435 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8b992919-0b8a-425d-9e1a-aec914a91965-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.322523 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.322632 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/df64d497-1d94-46ba-b773-da7ade77177a-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.323279 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8b992919-0b8a-425d-9e1a-aec914a91965-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.323716 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/df64d497-1d94-46ba-b773-da7ade77177a-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.324287 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8b992919-0b8a-425d-9e1a-aec914a91965-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.326814 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/df64d497-1d94-46ba-b773-da7ade77177a-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.327891 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/df64d497-1d94-46ba-b773-da7ade77177a-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.329563 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8b992919-0b8a-425d-9e1a-aec914a91965-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.330977 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8b992919-0b8a-425d-9e1a-aec914a91965-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.331528 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/df64d497-1d94-46ba-b773-da7ade77177a-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.338315 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hch96\" (UniqueName: \"kubernetes.io/projected/8b992919-0b8a-425d-9e1a-aec914a91965-kube-api-access-hch96\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.338579 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2ffc\" (UniqueName: \"kubernetes.io/projected/df64d497-1d94-46ba-b773-da7ade77177a-kube-api-access-d2ffc\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.378698 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"df64d497-1d94-46ba-b773-da7ade77177a\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.379785 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"8b992919-0b8a-425d-9e1a-aec914a91965\") " pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.409123 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.436448 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:00 crc kubenswrapper[4769]: E1125 10:11:00.582763 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Nov 25 10:11:00 crc kubenswrapper[4769]: E1125 10:11:00.582833 4769 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Nov 25 10:11:00 crc kubenswrapper[4769]: E1125 10:11:00.582988 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v8zpk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-gvtk6_openstack(495df49b-2ea1-4ee3-8d6d-5607d639d308): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 10:11:00 crc kubenswrapper[4769]: E1125 10:11:00.584195 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-gvtk6" podUID="495df49b-2ea1-4ee3-8d6d-5607d639d308" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.602724 4769 scope.go:117] "RemoveContainer" containerID="2a413543e21b9469c261c3a61cde7924060f4282a41e7a3c3da17759fd57caf4" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.685247 4769 scope.go:117] "RemoveContainer" containerID="e89816873ee41f43364f9d484d1ce5af7f6013fdc49031678da977a414617fba" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.851535 4769 scope.go:117] "RemoveContainer" containerID="e81a4a46bd061521e1409da211c03d0f8532e14a5cf15cf52d40cc95b4cda39d" Nov 25 10:11:00 crc kubenswrapper[4769]: I1125 10:11:00.865832 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 10:11:00 crc kubenswrapper[4769]: E1125 10:11:00.994659 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-gvtk6" podUID="495df49b-2ea1-4ee3-8d6d-5607d639d308" Nov 25 10:11:01 crc kubenswrapper[4769]: I1125 10:11:01.132866 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-xbxtj"] Nov 25 10:11:01 crc kubenswrapper[4769]: I1125 10:11:01.148425 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:11:01 crc kubenswrapper[4769]: I1125 10:11:01.316429 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:11:01 crc kubenswrapper[4769]: I1125 10:11:01.995647 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"df64d497-1d94-46ba-b773-da7ade77177a","Type":"ContainerStarted","Data":"84a58b59663780c6fecd93b6a9b9cf973a046c18ef6335dd8dc7151895e3e224"} Nov 25 10:11:02 crc kubenswrapper[4769]: I1125 10:11:02.000018 4769 generic.go:334] "Generic (PLEG): container finished" podID="891399fa-746b-40b9-b3b4-a3a7afe40aa6" containerID="5011b0e238564e0c7ca64a5eccf1e84f075bc675b3f22da3391a62f945070c2e" exitCode=0 Nov 25 10:11:02 crc kubenswrapper[4769]: I1125 10:11:02.000077 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" event={"ID":"891399fa-746b-40b9-b3b4-a3a7afe40aa6","Type":"ContainerDied","Data":"5011b0e238564e0c7ca64a5eccf1e84f075bc675b3f22da3391a62f945070c2e"} Nov 25 10:11:02 crc kubenswrapper[4769]: I1125 10:11:02.000122 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" event={"ID":"891399fa-746b-40b9-b3b4-a3a7afe40aa6","Type":"ContainerStarted","Data":"202bbae5b53199b27a02eabcf8ad8e073aad0f32ad56a243f223dd096a49a10a"} Nov 25 10:11:02 crc kubenswrapper[4769]: I1125 10:11:02.006112 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8b992919-0b8a-425d-9e1a-aec914a91965","Type":"ContainerStarted","Data":"59315e84b612d61ba38c6babb57f10ff2b5ea35ddfbce44db343f6825d6821ad"} Nov 25 10:11:03 crc kubenswrapper[4769]: I1125 10:11:03.018766 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" event={"ID":"891399fa-746b-40b9-b3b4-a3a7afe40aa6","Type":"ContainerStarted","Data":"fffaba9f675aebcd6ff1d6558b13d6c32f9594d6224197d95fbeadf6339e19ce"} Nov 25 10:11:03 crc kubenswrapper[4769]: I1125 10:11:03.019193 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:11:03 crc kubenswrapper[4769]: I1125 10:11:03.058570 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" podStartSLOduration=6.058538355 podStartE2EDuration="6.058538355s" podCreationTimestamp="2025-11-25 10:10:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:11:03.042858583 +0000 UTC m=+1611.627830896" watchObservedRunningTime="2025-11-25 10:11:03.058538355 +0000 UTC m=+1611.643510668" Nov 25 10:11:04 crc kubenswrapper[4769]: I1125 10:11:04.038873 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8b992919-0b8a-425d-9e1a-aec914a91965","Type":"ContainerStarted","Data":"028ea56a557e4c6820713b12429ecce3af15b11011b89e23f39469b23bdde59b"} Nov 25 10:11:04 crc kubenswrapper[4769]: I1125 10:11:04.045135 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"df64d497-1d94-46ba-b773-da7ade77177a","Type":"ContainerStarted","Data":"cbdd09ffb313dd7711e0e4b55abe9166f501f955a45c64c66acf7679e8500d65"} Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.476207 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.561689 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-bt68p"] Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.562363 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" podUID="d52e129a-837d-4574-aa08-69857eb9109f" containerName="dnsmasq-dns" containerID="cri-o://3d78941dc9dee668e4013ce2a77c316e616f77de5def1c04d01d33d39d7290ab" gracePeriod=10 Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.787765 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-p89c2"] Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.802917 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.820420 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-p89c2"] Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.896697 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-dns-svc\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.896741 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.896793 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.896835 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.896862 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.896895 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-config\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.896978 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kn6mq\" (UniqueName: \"kubernetes.io/projected/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-kube-api-access-kn6mq\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.999636 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-config\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.999754 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kn6mq\" (UniqueName: \"kubernetes.io/projected/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-kube-api-access-kn6mq\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.999850 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-dns-svc\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.999867 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.999904 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:08 crc kubenswrapper[4769]: I1125 10:11:08.999953 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.000021 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.000981 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-dns-swift-storage-0\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.001212 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-ovsdbserver-nb\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.001538 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-config\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.001616 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-ovsdbserver-sb\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.001843 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-dns-svc\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.001937 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-openstack-edpm-ipam\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.036309 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kn6mq\" (UniqueName: \"kubernetes.io/projected/c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0-kube-api-access-kn6mq\") pod \"dnsmasq-dns-bb85b8995-p89c2\" (UID: \"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0\") " pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.131940 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.163184 4769 generic.go:334] "Generic (PLEG): container finished" podID="d52e129a-837d-4574-aa08-69857eb9109f" containerID="3d78941dc9dee668e4013ce2a77c316e616f77de5def1c04d01d33d39d7290ab" exitCode=0 Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.163239 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" event={"ID":"d52e129a-837d-4574-aa08-69857eb9109f","Type":"ContainerDied","Data":"3d78941dc9dee668e4013ce2a77c316e616f77de5def1c04d01d33d39d7290ab"} Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.399361 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.519023 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7x4lv\" (UniqueName: \"kubernetes.io/projected/d52e129a-837d-4574-aa08-69857eb9109f-kube-api-access-7x4lv\") pod \"d52e129a-837d-4574-aa08-69857eb9109f\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.519089 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-ovsdbserver-sb\") pod \"d52e129a-837d-4574-aa08-69857eb9109f\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.519287 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-config\") pod \"d52e129a-837d-4574-aa08-69857eb9109f\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.519303 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-ovsdbserver-nb\") pod \"d52e129a-837d-4574-aa08-69857eb9109f\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.519329 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-dns-swift-storage-0\") pod \"d52e129a-837d-4574-aa08-69857eb9109f\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.519367 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-dns-svc\") pod \"d52e129a-837d-4574-aa08-69857eb9109f\" (UID: \"d52e129a-837d-4574-aa08-69857eb9109f\") " Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.529170 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d52e129a-837d-4574-aa08-69857eb9109f-kube-api-access-7x4lv" (OuterVolumeSpecName: "kube-api-access-7x4lv") pod "d52e129a-837d-4574-aa08-69857eb9109f" (UID: "d52e129a-837d-4574-aa08-69857eb9109f"). InnerVolumeSpecName "kube-api-access-7x4lv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.597595 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d52e129a-837d-4574-aa08-69857eb9109f" (UID: "d52e129a-837d-4574-aa08-69857eb9109f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.597760 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d52e129a-837d-4574-aa08-69857eb9109f" (UID: "d52e129a-837d-4574-aa08-69857eb9109f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.598896 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d52e129a-837d-4574-aa08-69857eb9109f" (UID: "d52e129a-837d-4574-aa08-69857eb9109f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.602528 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d52e129a-837d-4574-aa08-69857eb9109f" (UID: "d52e129a-837d-4574-aa08-69857eb9109f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.623297 4769 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.623353 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.623363 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7x4lv\" (UniqueName: \"kubernetes.io/projected/d52e129a-837d-4574-aa08-69857eb9109f-kube-api-access-7x4lv\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.623374 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.623384 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.629883 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-config" (OuterVolumeSpecName: "config") pod "d52e129a-837d-4574-aa08-69857eb9109f" (UID: "d52e129a-837d-4574-aa08-69857eb9109f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.727022 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d52e129a-837d-4574-aa08-69857eb9109f-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:09 crc kubenswrapper[4769]: I1125 10:11:09.809176 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bb85b8995-p89c2"] Nov 25 10:11:10 crc kubenswrapper[4769]: I1125 10:11:10.177198 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-p89c2" event={"ID":"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0","Type":"ContainerStarted","Data":"9b78d093769e6609fb1377bfaccac0ba895f1b2bdd03f89ff73640a08d151b0e"} Nov 25 10:11:10 crc kubenswrapper[4769]: I1125 10:11:10.181709 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" event={"ID":"d52e129a-837d-4574-aa08-69857eb9109f","Type":"ContainerDied","Data":"163897805c6c3c0b952a92162e13cb26b790b874731cc3be233f5c9de1507090"} Nov 25 10:11:10 crc kubenswrapper[4769]: I1125 10:11:10.181920 4769 scope.go:117] "RemoveContainer" containerID="3d78941dc9dee668e4013ce2a77c316e616f77de5def1c04d01d33d39d7290ab" Nov 25 10:11:10 crc kubenswrapper[4769]: I1125 10:11:10.182450 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79b5d74c8c-bt68p" Nov 25 10:11:10 crc kubenswrapper[4769]: I1125 10:11:10.228832 4769 scope.go:117] "RemoveContainer" containerID="6e129b0ee0b673a6111cbe4d8de6d7f20c479802392271c2dd26afc6eb73ad49" Nov 25 10:11:10 crc kubenswrapper[4769]: I1125 10:11:10.239797 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:11:10 crc kubenswrapper[4769]: E1125 10:11:10.240217 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:11:10 crc kubenswrapper[4769]: I1125 10:11:10.298854 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-bt68p"] Nov 25 10:11:10 crc kubenswrapper[4769]: I1125 10:11:10.311522 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79b5d74c8c-bt68p"] Nov 25 10:11:11 crc kubenswrapper[4769]: I1125 10:11:11.204270 4769 generic.go:334] "Generic (PLEG): container finished" podID="c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0" containerID="15d8972ea67dccd547743b946a58d0d00538bccbc4dab29862c1826edca7bcb9" exitCode=0 Nov 25 10:11:11 crc kubenswrapper[4769]: I1125 10:11:11.204728 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-p89c2" event={"ID":"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0","Type":"ContainerDied","Data":"15d8972ea67dccd547743b946a58d0d00538bccbc4dab29862c1826edca7bcb9"} Nov 25 10:11:12 crc kubenswrapper[4769]: I1125 10:11:12.223243 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bb85b8995-p89c2" event={"ID":"c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0","Type":"ContainerStarted","Data":"a947eceb4696348fb833f39166970a09b712c19492d57a1afda49be4a22dc08d"} Nov 25 10:11:12 crc kubenswrapper[4769]: I1125 10:11:12.223537 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:12 crc kubenswrapper[4769]: I1125 10:11:12.259201 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bb85b8995-p89c2" podStartSLOduration=4.259180963 podStartE2EDuration="4.259180963s" podCreationTimestamp="2025-11-25 10:11:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:11:12.25009739 +0000 UTC m=+1620.835069713" watchObservedRunningTime="2025-11-25 10:11:12.259180963 +0000 UTC m=+1620.844153286" Nov 25 10:11:12 crc kubenswrapper[4769]: I1125 10:11:12.272635 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d52e129a-837d-4574-aa08-69857eb9109f" path="/var/lib/kubelet/pods/d52e129a-837d-4574-aa08-69857eb9109f/volumes" Nov 25 10:11:13 crc kubenswrapper[4769]: I1125 10:11:13.242874 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-gvtk6" event={"ID":"495df49b-2ea1-4ee3-8d6d-5607d639d308","Type":"ContainerStarted","Data":"c3b08b14e9174ed05cc087a52cc807bf556d230a368177e6296fa17122ebeca7"} Nov 25 10:11:13 crc kubenswrapper[4769]: I1125 10:11:13.272320 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-gvtk6" podStartSLOduration=3.974768486 podStartE2EDuration="42.272292201s" podCreationTimestamp="2025-11-25 10:10:31 +0000 UTC" firstStartedPulling="2025-11-25 10:10:34.148288023 +0000 UTC m=+1582.733260346" lastFinishedPulling="2025-11-25 10:11:12.445811738 +0000 UTC m=+1621.030784061" observedRunningTime="2025-11-25 10:11:13.266178174 +0000 UTC m=+1621.851150527" watchObservedRunningTime="2025-11-25 10:11:13.272292201 +0000 UTC m=+1621.857264534" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.228136 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.272432 4769 generic.go:334] "Generic (PLEG): container finished" podID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerID="679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63" exitCode=137 Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.272476 4769 generic.go:334] "Generic (PLEG): container finished" podID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerID="98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530" exitCode=137 Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.272530 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16b8c066-592e-4ec8-99e9-aca97ded614a","Type":"ContainerDied","Data":"679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63"} Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.272573 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16b8c066-592e-4ec8-99e9-aca97ded614a","Type":"ContainerDied","Data":"98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530"} Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.272586 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"16b8c066-592e-4ec8-99e9-aca97ded614a","Type":"ContainerDied","Data":"2dc226269798ae68646ba71f6ba41bbfefc4f5cd9662a6f4e3b0eaa724e0c132"} Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.272561 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.272621 4769 scope.go:117] "RemoveContainer" containerID="679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.285104 4769 generic.go:334] "Generic (PLEG): container finished" podID="495df49b-2ea1-4ee3-8d6d-5607d639d308" containerID="c3b08b14e9174ed05cc087a52cc807bf556d230a368177e6296fa17122ebeca7" exitCode=0 Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.285162 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-gvtk6" event={"ID":"495df49b-2ea1-4ee3-8d6d-5607d639d308","Type":"ContainerDied","Data":"c3b08b14e9174ed05cc087a52cc807bf556d230a368177e6296fa17122ebeca7"} Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.340868 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-ceilometer-tls-certs\") pod \"16b8c066-592e-4ec8-99e9-aca97ded614a\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.341088 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-scripts\") pod \"16b8c066-592e-4ec8-99e9-aca97ded614a\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.341133 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-config-data\") pod \"16b8c066-592e-4ec8-99e9-aca97ded614a\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.341239 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-sg-core-conf-yaml\") pod \"16b8c066-592e-4ec8-99e9-aca97ded614a\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.341285 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czfg7\" (UniqueName: \"kubernetes.io/projected/16b8c066-592e-4ec8-99e9-aca97ded614a-kube-api-access-czfg7\") pod \"16b8c066-592e-4ec8-99e9-aca97ded614a\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.341407 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16b8c066-592e-4ec8-99e9-aca97ded614a-run-httpd\") pod \"16b8c066-592e-4ec8-99e9-aca97ded614a\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.341435 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-combined-ca-bundle\") pod \"16b8c066-592e-4ec8-99e9-aca97ded614a\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.341459 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16b8c066-592e-4ec8-99e9-aca97ded614a-log-httpd\") pod \"16b8c066-592e-4ec8-99e9-aca97ded614a\" (UID: \"16b8c066-592e-4ec8-99e9-aca97ded614a\") " Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.348730 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16b8c066-592e-4ec8-99e9-aca97ded614a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "16b8c066-592e-4ec8-99e9-aca97ded614a" (UID: "16b8c066-592e-4ec8-99e9-aca97ded614a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.351253 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16b8c066-592e-4ec8-99e9-aca97ded614a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "16b8c066-592e-4ec8-99e9-aca97ded614a" (UID: "16b8c066-592e-4ec8-99e9-aca97ded614a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.351676 4769 scope.go:117] "RemoveContainer" containerID="f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.362596 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16b8c066-592e-4ec8-99e9-aca97ded614a-kube-api-access-czfg7" (OuterVolumeSpecName: "kube-api-access-czfg7") pod "16b8c066-592e-4ec8-99e9-aca97ded614a" (UID: "16b8c066-592e-4ec8-99e9-aca97ded614a"). InnerVolumeSpecName "kube-api-access-czfg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.414337 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-scripts" (OuterVolumeSpecName: "scripts") pod "16b8c066-592e-4ec8-99e9-aca97ded614a" (UID: "16b8c066-592e-4ec8-99e9-aca97ded614a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.443667 4769 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16b8c066-592e-4ec8-99e9-aca97ded614a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.443689 4769 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/16b8c066-592e-4ec8-99e9-aca97ded614a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.443698 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.443711 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czfg7\" (UniqueName: \"kubernetes.io/projected/16b8c066-592e-4ec8-99e9-aca97ded614a-kube-api-access-czfg7\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.514727 4769 scope.go:117] "RemoveContainer" containerID="a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.515304 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "16b8c066-592e-4ec8-99e9-aca97ded614a" (UID: "16b8c066-592e-4ec8-99e9-aca97ded614a"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.528384 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "16b8c066-592e-4ec8-99e9-aca97ded614a" (UID: "16b8c066-592e-4ec8-99e9-aca97ded614a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.546254 4769 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.546285 4769 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.549207 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "16b8c066-592e-4ec8-99e9-aca97ded614a" (UID: "16b8c066-592e-4ec8-99e9-aca97ded614a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.552629 4769 scope.go:117] "RemoveContainer" containerID="98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.579400 4769 scope.go:117] "RemoveContainer" containerID="679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63" Nov 25 10:11:15 crc kubenswrapper[4769]: E1125 10:11:15.580008 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63\": container with ID starting with 679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63 not found: ID does not exist" containerID="679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.580056 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63"} err="failed to get container status \"679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63\": rpc error: code = NotFound desc = could not find container \"679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63\": container with ID starting with 679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63 not found: ID does not exist" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.580090 4769 scope.go:117] "RemoveContainer" containerID="f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a" Nov 25 10:11:15 crc kubenswrapper[4769]: E1125 10:11:15.580461 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a\": container with ID starting with f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a not found: ID does not exist" containerID="f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.580485 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a"} err="failed to get container status \"f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a\": rpc error: code = NotFound desc = could not find container \"f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a\": container with ID starting with f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a not found: ID does not exist" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.580500 4769 scope.go:117] "RemoveContainer" containerID="a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2" Nov 25 10:11:15 crc kubenswrapper[4769]: E1125 10:11:15.580893 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2\": container with ID starting with a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2 not found: ID does not exist" containerID="a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.580916 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2"} err="failed to get container status \"a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2\": rpc error: code = NotFound desc = could not find container \"a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2\": container with ID starting with a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2 not found: ID does not exist" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.580931 4769 scope.go:117] "RemoveContainer" containerID="98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530" Nov 25 10:11:15 crc kubenswrapper[4769]: E1125 10:11:15.581179 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530\": container with ID starting with 98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530 not found: ID does not exist" containerID="98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.581200 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530"} err="failed to get container status \"98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530\": rpc error: code = NotFound desc = could not find container \"98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530\": container with ID starting with 98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530 not found: ID does not exist" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.581213 4769 scope.go:117] "RemoveContainer" containerID="679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.581589 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63"} err="failed to get container status \"679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63\": rpc error: code = NotFound desc = could not find container \"679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63\": container with ID starting with 679eb47860e175f55b98b9697c77f7e76a82d69d926e1bd0bc724390556d6d63 not found: ID does not exist" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.581713 4769 scope.go:117] "RemoveContainer" containerID="f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.582117 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a"} err="failed to get container status \"f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a\": rpc error: code = NotFound desc = could not find container \"f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a\": container with ID starting with f4b45f2d3cb3749dd7b9d56784f5b99c70c299ffcc72226dda7225dffd99cf2a not found: ID does not exist" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.582143 4769 scope.go:117] "RemoveContainer" containerID="a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.582451 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2"} err="failed to get container status \"a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2\": rpc error: code = NotFound desc = could not find container \"a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2\": container with ID starting with a79adb94133f4d1fe092a1914b1650b3f94e6d98d4824bd6fbc5b4b6267f98f2 not found: ID does not exist" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.582472 4769 scope.go:117] "RemoveContainer" containerID="98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.582748 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530"} err="failed to get container status \"98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530\": rpc error: code = NotFound desc = could not find container \"98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530\": container with ID starting with 98a2022d83ec9a1208424102ad5dd54d2b33d8ff86b81fbb2572f14aa95ee530 not found: ID does not exist" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.609050 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-config-data" (OuterVolumeSpecName: "config-data") pod "16b8c066-592e-4ec8-99e9-aca97ded614a" (UID: "16b8c066-592e-4ec8-99e9-aca97ded614a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.648940 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.648997 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16b8c066-592e-4ec8-99e9-aca97ded614a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.929457 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.945581 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.956816 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:11:15 crc kubenswrapper[4769]: E1125 10:11:15.957775 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="ceilometer-central-agent" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.957800 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="ceilometer-central-agent" Nov 25 10:11:15 crc kubenswrapper[4769]: E1125 10:11:15.957833 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="ceilometer-notification-agent" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.957842 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="ceilometer-notification-agent" Nov 25 10:11:15 crc kubenswrapper[4769]: E1125 10:11:15.957873 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="proxy-httpd" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.957881 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="proxy-httpd" Nov 25 10:11:15 crc kubenswrapper[4769]: E1125 10:11:15.957896 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d52e129a-837d-4574-aa08-69857eb9109f" containerName="dnsmasq-dns" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.957902 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="d52e129a-837d-4574-aa08-69857eb9109f" containerName="dnsmasq-dns" Nov 25 10:11:15 crc kubenswrapper[4769]: E1125 10:11:15.957916 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="sg-core" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.957925 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="sg-core" Nov 25 10:11:15 crc kubenswrapper[4769]: E1125 10:11:15.957939 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d52e129a-837d-4574-aa08-69857eb9109f" containerName="init" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.957947 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="d52e129a-837d-4574-aa08-69857eb9109f" containerName="init" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.958229 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="ceilometer-central-agent" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.958258 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="d52e129a-837d-4574-aa08-69857eb9109f" containerName="dnsmasq-dns" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.958273 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="sg-core" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.958292 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="proxy-httpd" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.958300 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" containerName="ceilometer-notification-agent" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.961207 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.967261 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.967547 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 10:11:15 crc kubenswrapper[4769]: I1125 10:11:15.967672 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.000027 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.060341 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.060392 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb894707-cfa1-4716-a991-31992d8cff88-run-httpd\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.060580 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb894707-cfa1-4716-a991-31992d8cff88-log-httpd\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.060626 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.060864 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7lkj\" (UniqueName: \"kubernetes.io/projected/eb894707-cfa1-4716-a991-31992d8cff88-kube-api-access-l7lkj\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.061051 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-scripts\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.061136 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.061195 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-config-data\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.164078 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.164159 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7lkj\" (UniqueName: \"kubernetes.io/projected/eb894707-cfa1-4716-a991-31992d8cff88-kube-api-access-l7lkj\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.164216 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-scripts\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.164249 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.164273 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-config-data\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.164437 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.164460 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb894707-cfa1-4716-a991-31992d8cff88-run-httpd\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.164519 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb894707-cfa1-4716-a991-31992d8cff88-log-httpd\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.165308 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb894707-cfa1-4716-a991-31992d8cff88-run-httpd\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.165325 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb894707-cfa1-4716-a991-31992d8cff88-log-httpd\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: E1125 10:11:16.168975 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16b8c066_592e_4ec8_99e9_aca97ded614a.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16b8c066_592e_4ec8_99e9_aca97ded614a.slice/crio-2dc226269798ae68646ba71f6ba41bbfefc4f5cd9662a6f4e3b0eaa724e0c132\": RecentStats: unable to find data in memory cache]" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.170445 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-config-data\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.171167 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.171581 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.180464 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.180588 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb894707-cfa1-4716-a991-31992d8cff88-scripts\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.194271 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7lkj\" (UniqueName: \"kubernetes.io/projected/eb894707-cfa1-4716-a991-31992d8cff88-kube-api-access-l7lkj\") pod \"ceilometer-0\" (UID: \"eb894707-cfa1-4716-a991-31992d8cff88\") " pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.262180 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16b8c066-592e-4ec8-99e9-aca97ded614a" path="/var/lib/kubelet/pods/16b8c066-592e-4ec8-99e9-aca97ded614a/volumes" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.287073 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.651559 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-gvtk6" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.789690 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8zpk\" (UniqueName: \"kubernetes.io/projected/495df49b-2ea1-4ee3-8d6d-5607d639d308-kube-api-access-v8zpk\") pod \"495df49b-2ea1-4ee3-8d6d-5607d639d308\" (UID: \"495df49b-2ea1-4ee3-8d6d-5607d639d308\") " Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.790618 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/495df49b-2ea1-4ee3-8d6d-5607d639d308-combined-ca-bundle\") pod \"495df49b-2ea1-4ee3-8d6d-5607d639d308\" (UID: \"495df49b-2ea1-4ee3-8d6d-5607d639d308\") " Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.790799 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/495df49b-2ea1-4ee3-8d6d-5607d639d308-config-data\") pod \"495df49b-2ea1-4ee3-8d6d-5607d639d308\" (UID: \"495df49b-2ea1-4ee3-8d6d-5607d639d308\") " Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.801408 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/495df49b-2ea1-4ee3-8d6d-5607d639d308-kube-api-access-v8zpk" (OuterVolumeSpecName: "kube-api-access-v8zpk") pod "495df49b-2ea1-4ee3-8d6d-5607d639d308" (UID: "495df49b-2ea1-4ee3-8d6d-5607d639d308"). InnerVolumeSpecName "kube-api-access-v8zpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.880266 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/495df49b-2ea1-4ee3-8d6d-5607d639d308-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "495df49b-2ea1-4ee3-8d6d-5607d639d308" (UID: "495df49b-2ea1-4ee3-8d6d-5607d639d308"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.894734 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/495df49b-2ea1-4ee3-8d6d-5607d639d308-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.894779 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8zpk\" (UniqueName: \"kubernetes.io/projected/495df49b-2ea1-4ee3-8d6d-5607d639d308-kube-api-access-v8zpk\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.934945 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.941536 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/495df49b-2ea1-4ee3-8d6d-5607d639d308-config-data" (OuterVolumeSpecName: "config-data") pod "495df49b-2ea1-4ee3-8d6d-5607d639d308" (UID: "495df49b-2ea1-4ee3-8d6d-5607d639d308"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:16 crc kubenswrapper[4769]: I1125 10:11:16.997151 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/495df49b-2ea1-4ee3-8d6d-5607d639d308-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:17 crc kubenswrapper[4769]: I1125 10:11:17.318103 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-gvtk6" event={"ID":"495df49b-2ea1-4ee3-8d6d-5607d639d308","Type":"ContainerDied","Data":"669ef02bbe789a8d79e3dfc6fb816b20e269649834ab58eed3e4abc19d2be6c0"} Nov 25 10:11:17 crc kubenswrapper[4769]: I1125 10:11:17.318163 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="669ef02bbe789a8d79e3dfc6fb816b20e269649834ab58eed3e4abc19d2be6c0" Nov 25 10:11:17 crc kubenswrapper[4769]: I1125 10:11:17.318238 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-gvtk6" Nov 25 10:11:17 crc kubenswrapper[4769]: I1125 10:11:17.340103 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb894707-cfa1-4716-a991-31992d8cff88","Type":"ContainerStarted","Data":"7e577d84d1b8dfef03e9edf8484a66771dda3e3af25bad9ed94edeba242de523"} Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.400055 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-5479947c5b-lpmd8"] Nov 25 10:11:18 crc kubenswrapper[4769]: E1125 10:11:18.402036 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="495df49b-2ea1-4ee3-8d6d-5607d639d308" containerName="heat-db-sync" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.402057 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="495df49b-2ea1-4ee3-8d6d-5607d639d308" containerName="heat-db-sync" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.402386 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="495df49b-2ea1-4ee3-8d6d-5607d639d308" containerName="heat-db-sync" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.403691 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.419936 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-5479947c5b-lpmd8"] Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.486390 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-54cdcc7844-z2pdq"] Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.488910 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.505094 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-54cdcc7844-z2pdq"] Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.534741 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-55bb67dcdf-bnzxj"] Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.536437 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.553714 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cea603a-b499-42a3-a881-c783ecb978a5-combined-ca-bundle\") pod \"heat-engine-5479947c5b-lpmd8\" (UID: \"6cea603a-b499-42a3-a881-c783ecb978a5\") " pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.553802 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqpqj\" (UniqueName: \"kubernetes.io/projected/6cea603a-b499-42a3-a881-c783ecb978a5-kube-api-access-mqpqj\") pod \"heat-engine-5479947c5b-lpmd8\" (UID: \"6cea603a-b499-42a3-a881-c783ecb978a5\") " pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.553882 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6cea603a-b499-42a3-a881-c783ecb978a5-config-data-custom\") pod \"heat-engine-5479947c5b-lpmd8\" (UID: \"6cea603a-b499-42a3-a881-c783ecb978a5\") " pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.553921 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cea603a-b499-42a3-a881-c783ecb978a5-config-data\") pod \"heat-engine-5479947c5b-lpmd8\" (UID: \"6cea603a-b499-42a3-a881-c783ecb978a5\") " pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.556646 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-55bb67dcdf-bnzxj"] Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.656348 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qv26c\" (UniqueName: \"kubernetes.io/projected/40323897-a2ff-4536-af1c-e0777ba05b89-kube-api-access-qv26c\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.656429 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-config-data-custom\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.656504 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-public-tls-certs\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.656580 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-public-tls-certs\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.656649 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cea603a-b499-42a3-a881-c783ecb978a5-combined-ca-bundle\") pod \"heat-engine-5479947c5b-lpmd8\" (UID: \"6cea603a-b499-42a3-a881-c783ecb978a5\") " pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.656673 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-combined-ca-bundle\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.656753 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-internal-tls-certs\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.656792 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-config-data\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.656810 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqpqj\" (UniqueName: \"kubernetes.io/projected/6cea603a-b499-42a3-a881-c783ecb978a5-kube-api-access-mqpqj\") pod \"heat-engine-5479947c5b-lpmd8\" (UID: \"6cea603a-b499-42a3-a881-c783ecb978a5\") " pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.656827 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-config-data\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.657136 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-internal-tls-certs\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.657300 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-combined-ca-bundle\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.657428 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6cea603a-b499-42a3-a881-c783ecb978a5-config-data-custom\") pod \"heat-engine-5479947c5b-lpmd8\" (UID: \"6cea603a-b499-42a3-a881-c783ecb978a5\") " pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.657501 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgj9q\" (UniqueName: \"kubernetes.io/projected/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-kube-api-access-cgj9q\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.657874 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-config-data-custom\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.657905 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cea603a-b499-42a3-a881-c783ecb978a5-config-data\") pod \"heat-engine-5479947c5b-lpmd8\" (UID: \"6cea603a-b499-42a3-a881-c783ecb978a5\") " pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.675516 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cea603a-b499-42a3-a881-c783ecb978a5-config-data\") pod \"heat-engine-5479947c5b-lpmd8\" (UID: \"6cea603a-b499-42a3-a881-c783ecb978a5\") " pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.676466 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cea603a-b499-42a3-a881-c783ecb978a5-combined-ca-bundle\") pod \"heat-engine-5479947c5b-lpmd8\" (UID: \"6cea603a-b499-42a3-a881-c783ecb978a5\") " pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.676774 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6cea603a-b499-42a3-a881-c783ecb978a5-config-data-custom\") pod \"heat-engine-5479947c5b-lpmd8\" (UID: \"6cea603a-b499-42a3-a881-c783ecb978a5\") " pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.680541 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqpqj\" (UniqueName: \"kubernetes.io/projected/6cea603a-b499-42a3-a881-c783ecb978a5-kube-api-access-mqpqj\") pod \"heat-engine-5479947c5b-lpmd8\" (UID: \"6cea603a-b499-42a3-a881-c783ecb978a5\") " pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.750914 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.761239 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qv26c\" (UniqueName: \"kubernetes.io/projected/40323897-a2ff-4536-af1c-e0777ba05b89-kube-api-access-qv26c\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.761302 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-config-data-custom\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.761349 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-public-tls-certs\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.761403 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-public-tls-certs\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.761427 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-combined-ca-bundle\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.761485 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-internal-tls-certs\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.761501 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-config-data\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.761520 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-config-data\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.761586 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-internal-tls-certs\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.761605 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-combined-ca-bundle\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.761640 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgj9q\" (UniqueName: \"kubernetes.io/projected/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-kube-api-access-cgj9q\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.761659 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-config-data-custom\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.766208 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-config-data-custom\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.766784 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-internal-tls-certs\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.767288 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-combined-ca-bundle\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.767521 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-internal-tls-certs\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.768626 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-config-data-custom\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.771466 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-public-tls-certs\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.790717 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-combined-ca-bundle\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.800555 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-config-data\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.808630 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40323897-a2ff-4536-af1c-e0777ba05b89-config-data\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.811442 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-public-tls-certs\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.818911 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgj9q\" (UniqueName: \"kubernetes.io/projected/c54a9d3c-ee22-4f2a-9988-14600cbaf86d-kube-api-access-cgj9q\") pod \"heat-api-54cdcc7844-z2pdq\" (UID: \"c54a9d3c-ee22-4f2a-9988-14600cbaf86d\") " pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.832622 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qv26c\" (UniqueName: \"kubernetes.io/projected/40323897-a2ff-4536-af1c-e0777ba05b89-kube-api-access-qv26c\") pod \"heat-cfnapi-55bb67dcdf-bnzxj\" (UID: \"40323897-a2ff-4536-af1c-e0777ba05b89\") " pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:18 crc kubenswrapper[4769]: I1125 10:11:18.858691 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:19 crc kubenswrapper[4769]: I1125 10:11:19.107670 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:19 crc kubenswrapper[4769]: I1125 10:11:19.134130 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bb85b8995-p89c2" Nov 25 10:11:19 crc kubenswrapper[4769]: I1125 10:11:19.222494 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-xbxtj"] Nov 25 10:11:19 crc kubenswrapper[4769]: I1125 10:11:19.222732 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" podUID="891399fa-746b-40b9-b3b4-a3a7afe40aa6" containerName="dnsmasq-dns" containerID="cri-o://fffaba9f675aebcd6ff1d6558b13d6c32f9594d6224197d95fbeadf6339e19ce" gracePeriod=10 Nov 25 10:11:19 crc kubenswrapper[4769]: I1125 10:11:19.392232 4769 generic.go:334] "Generic (PLEG): container finished" podID="891399fa-746b-40b9-b3b4-a3a7afe40aa6" containerID="fffaba9f675aebcd6ff1d6558b13d6c32f9594d6224197d95fbeadf6339e19ce" exitCode=0 Nov 25 10:11:19 crc kubenswrapper[4769]: I1125 10:11:19.392504 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" event={"ID":"891399fa-746b-40b9-b3b4-a3a7afe40aa6","Type":"ContainerDied","Data":"fffaba9f675aebcd6ff1d6558b13d6c32f9594d6224197d95fbeadf6339e19ce"} Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.232365 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.340567 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b8x4k\" (UniqueName: \"kubernetes.io/projected/891399fa-746b-40b9-b3b4-a3a7afe40aa6-kube-api-access-b8x4k\") pod \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.340623 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-dns-svc\") pod \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.340738 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-dns-swift-storage-0\") pod \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.340809 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-openstack-edpm-ipam\") pod \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.340841 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-ovsdbserver-sb\") pod \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.340898 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-config\") pod \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.341002 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-ovsdbserver-nb\") pod \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\" (UID: \"891399fa-746b-40b9-b3b4-a3a7afe40aa6\") " Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.349324 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/891399fa-746b-40b9-b3b4-a3a7afe40aa6-kube-api-access-b8x4k" (OuterVolumeSpecName: "kube-api-access-b8x4k") pod "891399fa-746b-40b9-b3b4-a3a7afe40aa6" (UID: "891399fa-746b-40b9-b3b4-a3a7afe40aa6"). InnerVolumeSpecName "kube-api-access-b8x4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.428658 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" event={"ID":"891399fa-746b-40b9-b3b4-a3a7afe40aa6","Type":"ContainerDied","Data":"202bbae5b53199b27a02eabcf8ad8e073aad0f32ad56a243f223dd096a49a10a"} Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.428721 4769 scope.go:117] "RemoveContainer" containerID="fffaba9f675aebcd6ff1d6558b13d6c32f9594d6224197d95fbeadf6339e19ce" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.428916 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68df85789f-xbxtj" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.440034 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb894707-cfa1-4716-a991-31992d8cff88","Type":"ContainerStarted","Data":"78d1379800f04f8ad4decb9661f3d1d7cd9089312221cd1ef27440e40e9bfff8"} Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.442163 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "891399fa-746b-40b9-b3b4-a3a7afe40aa6" (UID: "891399fa-746b-40b9-b3b4-a3a7afe40aa6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.444039 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b8x4k\" (UniqueName: \"kubernetes.io/projected/891399fa-746b-40b9-b3b4-a3a7afe40aa6-kube-api-access-b8x4k\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.444057 4769 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.449599 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "891399fa-746b-40b9-b3b4-a3a7afe40aa6" (UID: "891399fa-746b-40b9-b3b4-a3a7afe40aa6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.460613 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "891399fa-746b-40b9-b3b4-a3a7afe40aa6" (UID: "891399fa-746b-40b9-b3b4-a3a7afe40aa6"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.470769 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-config" (OuterVolumeSpecName: "config") pod "891399fa-746b-40b9-b3b4-a3a7afe40aa6" (UID: "891399fa-746b-40b9-b3b4-a3a7afe40aa6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.478842 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "891399fa-746b-40b9-b3b4-a3a7afe40aa6" (UID: "891399fa-746b-40b9-b3b4-a3a7afe40aa6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.482899 4769 scope.go:117] "RemoveContainer" containerID="5011b0e238564e0c7ca64a5eccf1e84f075bc675b3f22da3391a62f945070c2e" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.483195 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "891399fa-746b-40b9-b3b4-a3a7afe40aa6" (UID: "891399fa-746b-40b9-b3b4-a3a7afe40aa6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.546579 4769 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.546611 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.546624 4769 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.546635 4769 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.546646 4769 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/891399fa-746b-40b9-b3b4-a3a7afe40aa6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.634174 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-55bb67dcdf-bnzxj"] Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.780370 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-xbxtj"] Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.794199 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68df85789f-xbxtj"] Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.815847 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-5479947c5b-lpmd8"] Nov 25 10:11:21 crc kubenswrapper[4769]: I1125 10:11:21.830827 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-54cdcc7844-z2pdq"] Nov 25 10:11:22 crc kubenswrapper[4769]: I1125 10:11:22.264685 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="891399fa-746b-40b9-b3b4-a3a7afe40aa6" path="/var/lib/kubelet/pods/891399fa-746b-40b9-b3b4-a3a7afe40aa6/volumes" Nov 25 10:11:22 crc kubenswrapper[4769]: I1125 10:11:22.455927 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-54cdcc7844-z2pdq" event={"ID":"c54a9d3c-ee22-4f2a-9988-14600cbaf86d","Type":"ContainerStarted","Data":"313f8917de8ef71a0cc846168e3342e016405d162330713762c6629da293f201"} Nov 25 10:11:22 crc kubenswrapper[4769]: I1125 10:11:22.461032 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb894707-cfa1-4716-a991-31992d8cff88","Type":"ContainerStarted","Data":"d29890ea0ad93446633d5bdcb85e29dbc4ad40743e616c99538b29869e4d4894"} Nov 25 10:11:22 crc kubenswrapper[4769]: I1125 10:11:22.464157 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-5479947c5b-lpmd8" event={"ID":"6cea603a-b499-42a3-a881-c783ecb978a5","Type":"ContainerStarted","Data":"0f29baa3399bb9073de0f2f10f7d1f1e13daff54133275e71b7369f57ea3ce3b"} Nov 25 10:11:22 crc kubenswrapper[4769]: I1125 10:11:22.464224 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-5479947c5b-lpmd8" event={"ID":"6cea603a-b499-42a3-a881-c783ecb978a5","Type":"ContainerStarted","Data":"afbfedfcfbbd67580c81599654019eebbe29cca7cb4af4c1e4f6fee3ff73d0e2"} Nov 25 10:11:22 crc kubenswrapper[4769]: I1125 10:11:22.464293 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:22 crc kubenswrapper[4769]: I1125 10:11:22.466850 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" event={"ID":"40323897-a2ff-4536-af1c-e0777ba05b89","Type":"ContainerStarted","Data":"cda01bd337ab2d424fc82a81e01be3e71862d74db8e21cc8d6aee741a66d771f"} Nov 25 10:11:22 crc kubenswrapper[4769]: I1125 10:11:22.492013 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-5479947c5b-lpmd8" podStartSLOduration=4.491986267 podStartE2EDuration="4.491986267s" podCreationTimestamp="2025-11-25 10:11:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:11:22.491066313 +0000 UTC m=+1631.076038626" watchObservedRunningTime="2025-11-25 10:11:22.491986267 +0000 UTC m=+1631.076958580" Nov 25 10:11:25 crc kubenswrapper[4769]: I1125 10:11:25.237216 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:11:25 crc kubenswrapper[4769]: E1125 10:11:25.238316 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:11:25 crc kubenswrapper[4769]: I1125 10:11:25.521096 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" event={"ID":"40323897-a2ff-4536-af1c-e0777ba05b89","Type":"ContainerStarted","Data":"8ddbb6aad810a6d8986f88dd2ba5ba46613a53efa0a229a01a191eaddcd687d3"} Nov 25 10:11:25 crc kubenswrapper[4769]: I1125 10:11:25.521238 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:25 crc kubenswrapper[4769]: I1125 10:11:25.523365 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-54cdcc7844-z2pdq" event={"ID":"c54a9d3c-ee22-4f2a-9988-14600cbaf86d","Type":"ContainerStarted","Data":"692b723ba90cefe27fd02fc1c89fe02e694c6d0f23f4088d8d4d2d7450da2391"} Nov 25 10:11:25 crc kubenswrapper[4769]: I1125 10:11:25.525249 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:25 crc kubenswrapper[4769]: I1125 10:11:25.527927 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb894707-cfa1-4716-a991-31992d8cff88","Type":"ContainerStarted","Data":"fd4fdd77a2b818c680100820a7222202643e930794e73408353a31ced51011d6"} Nov 25 10:11:25 crc kubenswrapper[4769]: I1125 10:11:25.564831 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" podStartSLOduration=4.718140087 podStartE2EDuration="7.564802752s" podCreationTimestamp="2025-11-25 10:11:18 +0000 UTC" firstStartedPulling="2025-11-25 10:11:21.644131536 +0000 UTC m=+1630.229103849" lastFinishedPulling="2025-11-25 10:11:24.490794201 +0000 UTC m=+1633.075766514" observedRunningTime="2025-11-25 10:11:25.544582833 +0000 UTC m=+1634.129555146" watchObservedRunningTime="2025-11-25 10:11:25.564802752 +0000 UTC m=+1634.149775075" Nov 25 10:11:25 crc kubenswrapper[4769]: I1125 10:11:25.588283 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-54cdcc7844-z2pdq" podStartSLOduration=4.969928692 podStartE2EDuration="7.588260703s" podCreationTimestamp="2025-11-25 10:11:18 +0000 UTC" firstStartedPulling="2025-11-25 10:11:21.818083206 +0000 UTC m=+1630.403055519" lastFinishedPulling="2025-11-25 10:11:24.436415217 +0000 UTC m=+1633.021387530" observedRunningTime="2025-11-25 10:11:25.582180757 +0000 UTC m=+1634.167153070" watchObservedRunningTime="2025-11-25 10:11:25.588260703 +0000 UTC m=+1634.173233016" Nov 25 10:11:26 crc kubenswrapper[4769]: I1125 10:11:26.557778 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb894707-cfa1-4716-a991-31992d8cff88","Type":"ContainerStarted","Data":"acb1d57c722e9f79c256074f2163e0f83b28e5f12c9177ec62c9aea25f553756"} Nov 25 10:11:26 crc kubenswrapper[4769]: I1125 10:11:26.559247 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:11:26 crc kubenswrapper[4769]: I1125 10:11:26.645189 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.568853276 podStartE2EDuration="11.645163655s" podCreationTimestamp="2025-11-25 10:11:15 +0000 UTC" firstStartedPulling="2025-11-25 10:11:16.927984062 +0000 UTC m=+1625.512956375" lastFinishedPulling="2025-11-25 10:11:26.004294411 +0000 UTC m=+1634.589266754" observedRunningTime="2025-11-25 10:11:26.587041145 +0000 UTC m=+1635.172013488" watchObservedRunningTime="2025-11-25 10:11:26.645163655 +0000 UTC m=+1635.230135978" Nov 25 10:11:30 crc kubenswrapper[4769]: I1125 10:11:30.677363 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-54cdcc7844-z2pdq" Nov 25 10:11:30 crc kubenswrapper[4769]: I1125 10:11:30.806910 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-58b7d79fd9-47jn4"] Nov 25 10:11:30 crc kubenswrapper[4769]: I1125 10:11:30.807384 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-58b7d79fd9-47jn4" podUID="c8bd1d11-b593-4c27-a79e-d49792d851ee" containerName="heat-api" containerID="cri-o://79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178" gracePeriod=60 Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.440812 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2"] Nov 25 10:11:33 crc kubenswrapper[4769]: E1125 10:11:33.444681 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="891399fa-746b-40b9-b3b4-a3a7afe40aa6" containerName="dnsmasq-dns" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.444707 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="891399fa-746b-40b9-b3b4-a3a7afe40aa6" containerName="dnsmasq-dns" Nov 25 10:11:33 crc kubenswrapper[4769]: E1125 10:11:33.444735 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="891399fa-746b-40b9-b3b4-a3a7afe40aa6" containerName="init" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.444869 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="891399fa-746b-40b9-b3b4-a3a7afe40aa6" containerName="init" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.445462 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="891399fa-746b-40b9-b3b4-a3a7afe40aa6" containerName="dnsmasq-dns" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.447498 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.454268 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.454276 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.454653 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.460994 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.477737 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2"] Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.523528 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.524317 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxzhh\" (UniqueName: \"kubernetes.io/projected/56107e4b-15d6-4358-ac81-c6bcc8fcc737-kube-api-access-wxzhh\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.524377 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.524727 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.627255 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.627396 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.627448 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxzhh\" (UniqueName: \"kubernetes.io/projected/56107e4b-15d6-4358-ac81-c6bcc8fcc737-kube-api-access-wxzhh\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.627475 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.635830 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.638267 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.645545 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.649542 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxzhh\" (UniqueName: \"kubernetes.io/projected/56107e4b-15d6-4358-ac81-c6bcc8fcc737-kube-api-access-wxzhh\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.794914 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:11:33 crc kubenswrapper[4769]: I1125 10:11:33.977992 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-58b7d79fd9-47jn4" podUID="c8bd1d11-b593-4c27-a79e-d49792d851ee" containerName="heat-api" probeResult="failure" output="Get \"https://10.217.0.219:8004/healthcheck\": read tcp 10.217.0.2:54366->10.217.0.219:8004: read: connection reset by peer" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.527333 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.654929 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7qff\" (UniqueName: \"kubernetes.io/projected/c8bd1d11-b593-4c27-a79e-d49792d851ee-kube-api-access-t7qff\") pod \"c8bd1d11-b593-4c27-a79e-d49792d851ee\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.655039 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-public-tls-certs\") pod \"c8bd1d11-b593-4c27-a79e-d49792d851ee\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.655075 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-internal-tls-certs\") pod \"c8bd1d11-b593-4c27-a79e-d49792d851ee\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.655165 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-config-data-custom\") pod \"c8bd1d11-b593-4c27-a79e-d49792d851ee\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.655206 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-config-data\") pod \"c8bd1d11-b593-4c27-a79e-d49792d851ee\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.655277 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-combined-ca-bundle\") pod \"c8bd1d11-b593-4c27-a79e-d49792d851ee\" (UID: \"c8bd1d11-b593-4c27-a79e-d49792d851ee\") " Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.661371 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8bd1d11-b593-4c27-a79e-d49792d851ee-kube-api-access-t7qff" (OuterVolumeSpecName: "kube-api-access-t7qff") pod "c8bd1d11-b593-4c27-a79e-d49792d851ee" (UID: "c8bd1d11-b593-4c27-a79e-d49792d851ee"). InnerVolumeSpecName "kube-api-access-t7qff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.667086 4769 generic.go:334] "Generic (PLEG): container finished" podID="c8bd1d11-b593-4c27-a79e-d49792d851ee" containerID="79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178" exitCode=0 Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.667131 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-58b7d79fd9-47jn4" event={"ID":"c8bd1d11-b593-4c27-a79e-d49792d851ee","Type":"ContainerDied","Data":"79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178"} Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.667159 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-58b7d79fd9-47jn4" event={"ID":"c8bd1d11-b593-4c27-a79e-d49792d851ee","Type":"ContainerDied","Data":"8aa1b67da5761c0e354214ffc51afdebe4e113989a68947a8f63b0e803e51f01"} Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.667178 4769 scope.go:117] "RemoveContainer" containerID="79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.667315 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-58b7d79fd9-47jn4" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.671165 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c8bd1d11-b593-4c27-a79e-d49792d851ee" (UID: "c8bd1d11-b593-4c27-a79e-d49792d851ee"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.699726 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c8bd1d11-b593-4c27-a79e-d49792d851ee" (UID: "c8bd1d11-b593-4c27-a79e-d49792d851ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.739815 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-config-data" (OuterVolumeSpecName: "config-data") pod "c8bd1d11-b593-4c27-a79e-d49792d851ee" (UID: "c8bd1d11-b593-4c27-a79e-d49792d851ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.750130 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c8bd1d11-b593-4c27-a79e-d49792d851ee" (UID: "c8bd1d11-b593-4c27-a79e-d49792d851ee"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.752228 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c8bd1d11-b593-4c27-a79e-d49792d851ee" (UID: "c8bd1d11-b593-4c27-a79e-d49792d851ee"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.760241 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7qff\" (UniqueName: \"kubernetes.io/projected/c8bd1d11-b593-4c27-a79e-d49792d851ee-kube-api-access-t7qff\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.760275 4769 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.760358 4769 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.760393 4769 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.760407 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.760417 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8bd1d11-b593-4c27-a79e-d49792d851ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.769170 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2"] Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.890900 4769 scope.go:117] "RemoveContainer" containerID="79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178" Nov 25 10:11:34 crc kubenswrapper[4769]: E1125 10:11:34.891946 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178\": container with ID starting with 79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178 not found: ID does not exist" containerID="79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178" Nov 25 10:11:34 crc kubenswrapper[4769]: I1125 10:11:34.892010 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178"} err="failed to get container status \"79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178\": rpc error: code = NotFound desc = could not find container \"79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178\": container with ID starting with 79c2e0d139b7a0949a3bb799a63b670a13c45910d0dad1425c8c980bf82ef178 not found: ID does not exist" Nov 25 10:11:35 crc kubenswrapper[4769]: I1125 10:11:35.021866 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-58b7d79fd9-47jn4"] Nov 25 10:11:35 crc kubenswrapper[4769]: I1125 10:11:35.037230 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-58b7d79fd9-47jn4"] Nov 25 10:11:35 crc kubenswrapper[4769]: I1125 10:11:35.525365 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-55bb67dcdf-bnzxj" Nov 25 10:11:35 crc kubenswrapper[4769]: I1125 10:11:35.601385 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-546cbc7f88-ncbrw"] Nov 25 10:11:35 crc kubenswrapper[4769]: I1125 10:11:35.602055 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" podUID="f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" containerName="heat-cfnapi" containerID="cri-o://3fb0a5458cfab3fc19b103fcdc99c45a4e7c256b6503b12bbec50b55947981c3" gracePeriod=60 Nov 25 10:11:35 crc kubenswrapper[4769]: I1125 10:11:35.680999 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" event={"ID":"56107e4b-15d6-4358-ac81-c6bcc8fcc737","Type":"ContainerStarted","Data":"ef683106d0a9d37f5cc554cca327f77b606fd1cde2ab2f7c5a80b073ec1473d0"} Nov 25 10:11:36 crc kubenswrapper[4769]: I1125 10:11:36.238001 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:11:36 crc kubenswrapper[4769]: E1125 10:11:36.238328 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:11:36 crc kubenswrapper[4769]: I1125 10:11:36.251286 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8bd1d11-b593-4c27-a79e-d49792d851ee" path="/var/lib/kubelet/pods/c8bd1d11-b593-4c27-a79e-d49792d851ee/volumes" Nov 25 10:11:36 crc kubenswrapper[4769]: I1125 10:11:36.701582 4769 generic.go:334] "Generic (PLEG): container finished" podID="8b992919-0b8a-425d-9e1a-aec914a91965" containerID="028ea56a557e4c6820713b12429ecce3af15b11011b89e23f39469b23bdde59b" exitCode=0 Nov 25 10:11:36 crc kubenswrapper[4769]: I1125 10:11:36.701639 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8b992919-0b8a-425d-9e1a-aec914a91965","Type":"ContainerDied","Data":"028ea56a557e4c6820713b12429ecce3af15b11011b89e23f39469b23bdde59b"} Nov 25 10:11:36 crc kubenswrapper[4769]: I1125 10:11:36.705338 4769 generic.go:334] "Generic (PLEG): container finished" podID="df64d497-1d94-46ba-b773-da7ade77177a" containerID="cbdd09ffb313dd7711e0e4b55abe9166f501f955a45c64c66acf7679e8500d65" exitCode=0 Nov 25 10:11:36 crc kubenswrapper[4769]: I1125 10:11:36.705401 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"df64d497-1d94-46ba-b773-da7ade77177a","Type":"ContainerDied","Data":"cbdd09ffb313dd7711e0e4b55abe9166f501f955a45c64c66acf7679e8500d65"} Nov 25 10:11:38 crc kubenswrapper[4769]: I1125 10:11:38.733396 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"df64d497-1d94-46ba-b773-da7ade77177a","Type":"ContainerStarted","Data":"df221d6fed777d6995510cbc11024c7b6e5e934be7286e2fd05346f39f440e4b"} Nov 25 10:11:38 crc kubenswrapper[4769]: I1125 10:11:38.734361 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:11:38 crc kubenswrapper[4769]: I1125 10:11:38.738831 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8b992919-0b8a-425d-9e1a-aec914a91965","Type":"ContainerStarted","Data":"1bef30ea5dd150faefd870c46d8b4827ccd69199e6b16c3318d07b5973851902"} Nov 25 10:11:38 crc kubenswrapper[4769]: I1125 10:11:38.739182 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 10:11:38 crc kubenswrapper[4769]: I1125 10:11:38.754244 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" podUID="f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.0.218:8000/healthcheck\": read tcp 10.217.0.2:43626->10.217.0.218:8000: read: connection reset by peer" Nov 25 10:11:38 crc kubenswrapper[4769]: I1125 10:11:38.765715 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.765691265 podStartE2EDuration="38.765691265s" podCreationTimestamp="2025-11-25 10:11:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:11:38.755016011 +0000 UTC m=+1647.339988334" watchObservedRunningTime="2025-11-25 10:11:38.765691265 +0000 UTC m=+1647.350663568" Nov 25 10:11:38 crc kubenswrapper[4769]: I1125 10:11:38.779299 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=39.779284404 podStartE2EDuration="39.779284404s" podCreationTimestamp="2025-11-25 10:10:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:11:38.778634657 +0000 UTC m=+1647.363606970" watchObservedRunningTime="2025-11-25 10:11:38.779284404 +0000 UTC m=+1647.364256717" Nov 25 10:11:38 crc kubenswrapper[4769]: I1125 10:11:38.813375 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-5479947c5b-lpmd8" Nov 25 10:11:38 crc kubenswrapper[4769]: I1125 10:11:38.895132 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-779556746f-vswpj"] Nov 25 10:11:38 crc kubenswrapper[4769]: I1125 10:11:38.895374 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-779556746f-vswpj" podUID="a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1" containerName="heat-engine" containerID="cri-o://1a40c4ebd0306d941f589b392746c158e1be9e00a1efae92685ee197c68c2d34" gracePeriod=60 Nov 25 10:11:39 crc kubenswrapper[4769]: E1125 10:11:39.663314 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1a40c4ebd0306d941f589b392746c158e1be9e00a1efae92685ee197c68c2d34" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:11:39 crc kubenswrapper[4769]: E1125 10:11:39.665195 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1a40c4ebd0306d941f589b392746c158e1be9e00a1efae92685ee197c68c2d34" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:11:39 crc kubenswrapper[4769]: E1125 10:11:39.667347 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1a40c4ebd0306d941f589b392746c158e1be9e00a1efae92685ee197c68c2d34" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:11:39 crc kubenswrapper[4769]: E1125 10:11:39.667421 4769 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-779556746f-vswpj" podUID="a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1" containerName="heat-engine" Nov 25 10:11:39 crc kubenswrapper[4769]: I1125 10:11:39.760001 4769 generic.go:334] "Generic (PLEG): container finished" podID="f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" containerID="3fb0a5458cfab3fc19b103fcdc99c45a4e7c256b6503b12bbec50b55947981c3" exitCode=0 Nov 25 10:11:39 crc kubenswrapper[4769]: I1125 10:11:39.760458 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" event={"ID":"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb","Type":"ContainerDied","Data":"3fb0a5458cfab3fc19b103fcdc99c45a4e7c256b6503b12bbec50b55947981c3"} Nov 25 10:11:41 crc kubenswrapper[4769]: I1125 10:11:41.825090 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-gv6vg"] Nov 25 10:11:41 crc kubenswrapper[4769]: I1125 10:11:41.840848 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-gv6vg"] Nov 25 10:11:41 crc kubenswrapper[4769]: I1125 10:11:41.926489 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-7dvlm"] Nov 25 10:11:41 crc kubenswrapper[4769]: E1125 10:11:41.926982 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8bd1d11-b593-4c27-a79e-d49792d851ee" containerName="heat-api" Nov 25 10:11:41 crc kubenswrapper[4769]: I1125 10:11:41.926994 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8bd1d11-b593-4c27-a79e-d49792d851ee" containerName="heat-api" Nov 25 10:11:41 crc kubenswrapper[4769]: I1125 10:11:41.927234 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8bd1d11-b593-4c27-a79e-d49792d851ee" containerName="heat-api" Nov 25 10:11:41 crc kubenswrapper[4769]: I1125 10:11:41.927918 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-7dvlm"] Nov 25 10:11:41 crc kubenswrapper[4769]: I1125 10:11:41.928068 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:41 crc kubenswrapper[4769]: I1125 10:11:41.930954 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 10:11:41 crc kubenswrapper[4769]: I1125 10:11:41.975654 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-config-data\") pod \"aodh-db-sync-7dvlm\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:41 crc kubenswrapper[4769]: I1125 10:11:41.975727 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-combined-ca-bundle\") pod \"aodh-db-sync-7dvlm\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:41 crc kubenswrapper[4769]: I1125 10:11:41.975802 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-scripts\") pod \"aodh-db-sync-7dvlm\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:41 crc kubenswrapper[4769]: I1125 10:11:41.975847 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqrtp\" (UniqueName: \"kubernetes.io/projected/8738db83-202d-4c3c-bed9-2492a4a611c8-kube-api-access-jqrtp\") pod \"aodh-db-sync-7dvlm\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:42 crc kubenswrapper[4769]: I1125 10:11:42.077646 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-scripts\") pod \"aodh-db-sync-7dvlm\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:42 crc kubenswrapper[4769]: I1125 10:11:42.077710 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqrtp\" (UniqueName: \"kubernetes.io/projected/8738db83-202d-4c3c-bed9-2492a4a611c8-kube-api-access-jqrtp\") pod \"aodh-db-sync-7dvlm\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:42 crc kubenswrapper[4769]: I1125 10:11:42.077798 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-config-data\") pod \"aodh-db-sync-7dvlm\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:42 crc kubenswrapper[4769]: I1125 10:11:42.077850 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-combined-ca-bundle\") pod \"aodh-db-sync-7dvlm\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:42 crc kubenswrapper[4769]: I1125 10:11:42.084598 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-scripts\") pod \"aodh-db-sync-7dvlm\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:42 crc kubenswrapper[4769]: I1125 10:11:42.084767 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-config-data\") pod \"aodh-db-sync-7dvlm\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:42 crc kubenswrapper[4769]: I1125 10:11:42.085394 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-combined-ca-bundle\") pod \"aodh-db-sync-7dvlm\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:42 crc kubenswrapper[4769]: I1125 10:11:42.098362 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqrtp\" (UniqueName: \"kubernetes.io/projected/8738db83-202d-4c3c-bed9-2492a4a611c8-kube-api-access-jqrtp\") pod \"aodh-db-sync-7dvlm\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:42 crc kubenswrapper[4769]: I1125 10:11:42.252300 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb6debff-79c0-45cc-915d-8d40aa9a4b78" path="/var/lib/kubelet/pods/cb6debff-79c0-45cc-915d-8d40aa9a4b78/volumes" Nov 25 10:11:42 crc kubenswrapper[4769]: I1125 10:11:42.271697 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:43 crc kubenswrapper[4769]: I1125 10:11:43.458245 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" podUID="f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" containerName="heat-cfnapi" probeResult="failure" output="Get \"https://10.217.0.218:8000/healthcheck\": dial tcp 10.217.0.218:8000: connect: connection refused" Nov 25 10:11:46 crc kubenswrapper[4769]: I1125 10:11:46.912094 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.037000 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.202084 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-config-data-custom\") pod \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.202159 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-internal-tls-certs\") pod \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.202333 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-public-tls-certs\") pod \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.202462 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5f66\" (UniqueName: \"kubernetes.io/projected/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-kube-api-access-j5f66\") pod \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.202540 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-config-data\") pod \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.202632 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-combined-ca-bundle\") pod \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\" (UID: \"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb\") " Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.211252 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-kube-api-access-j5f66" (OuterVolumeSpecName: "kube-api-access-j5f66") pod "f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" (UID: "f5031f85-50f1-4ffe-b4c4-8ef90a3084bb"). InnerVolumeSpecName "kube-api-access-j5f66". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.211615 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" (UID: "f5031f85-50f1-4ffe-b4c4-8ef90a3084bb"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.251236 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" (UID: "f5031f85-50f1-4ffe-b4c4-8ef90a3084bb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.304837 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" (UID: "f5031f85-50f1-4ffe-b4c4-8ef90a3084bb"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.306716 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.306745 4769 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.306754 4769 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.306763 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5f66\" (UniqueName: \"kubernetes.io/projected/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-kube-api-access-j5f66\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.313576 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" (UID: "f5031f85-50f1-4ffe-b4c4-8ef90a3084bb"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.341134 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-config-data" (OuterVolumeSpecName: "config-data") pod "f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" (UID: "f5031f85-50f1-4ffe-b4c4-8ef90a3084bb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.360065 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-7dvlm"] Nov 25 10:11:48 crc kubenswrapper[4769]: W1125 10:11:48.374913 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8738db83_202d_4c3c_bed9_2492a4a611c8.slice/crio-8ef0e4a6e1d52f1daec91729e118eb5c39613a5c7006d668e754488e3748466b WatchSource:0}: Error finding container 8ef0e4a6e1d52f1daec91729e118eb5c39613a5c7006d668e754488e3748466b: Status 404 returned error can't find the container with id 8ef0e4a6e1d52f1daec91729e118eb5c39613a5c7006d668e754488e3748466b Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.410228 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.410274 4769 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.931127 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" event={"ID":"f5031f85-50f1-4ffe-b4c4-8ef90a3084bb","Type":"ContainerDied","Data":"af1e91f2c453e5bf782442bbc99a6d59c43e54878bd360311330cc1e9c7a50f6"} Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.931561 4769 scope.go:117] "RemoveContainer" containerID="3fb0a5458cfab3fc19b103fcdc99c45a4e7c256b6503b12bbec50b55947981c3" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.934072 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-546cbc7f88-ncbrw" Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.950236 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-7dvlm" event={"ID":"8738db83-202d-4c3c-bed9-2492a4a611c8","Type":"ContainerStarted","Data":"8ef0e4a6e1d52f1daec91729e118eb5c39613a5c7006d668e754488e3748466b"} Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.962672 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" event={"ID":"56107e4b-15d6-4358-ac81-c6bcc8fcc737","Type":"ContainerStarted","Data":"8da3af08ca5cc03bd9d6557b5c88c8601c6752e9f06f12adfe6c39945cd1899a"} Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.981954 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-546cbc7f88-ncbrw"] Nov 25 10:11:48 crc kubenswrapper[4769]: I1125 10:11:48.998110 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-546cbc7f88-ncbrw"] Nov 25 10:11:49 crc kubenswrapper[4769]: I1125 10:11:49.012190 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" podStartSLOduration=3.087375155 podStartE2EDuration="16.01216151s" podCreationTimestamp="2025-11-25 10:11:33 +0000 UTC" firstStartedPulling="2025-11-25 10:11:34.774000118 +0000 UTC m=+1643.358972441" lastFinishedPulling="2025-11-25 10:11:47.698786493 +0000 UTC m=+1656.283758796" observedRunningTime="2025-11-25 10:11:48.984330396 +0000 UTC m=+1657.569302709" watchObservedRunningTime="2025-11-25 10:11:49.01216151 +0000 UTC m=+1657.597133813" Nov 25 10:11:49 crc kubenswrapper[4769]: E1125 10:11:49.664162 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1a40c4ebd0306d941f589b392746c158e1be9e00a1efae92685ee197c68c2d34" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:11:49 crc kubenswrapper[4769]: E1125 10:11:49.669341 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1a40c4ebd0306d941f589b392746c158e1be9e00a1efae92685ee197c68c2d34" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:11:49 crc kubenswrapper[4769]: E1125 10:11:49.684396 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1a40c4ebd0306d941f589b392746c158e1be9e00a1efae92685ee197c68c2d34" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:11:49 crc kubenswrapper[4769]: E1125 10:11:49.684490 4769 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-779556746f-vswpj" podUID="a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1" containerName="heat-engine" Nov 25 10:11:50 crc kubenswrapper[4769]: I1125 10:11:50.256622 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" path="/var/lib/kubelet/pods/f5031f85-50f1-4ffe-b4c4-8ef90a3084bb/volumes" Nov 25 10:11:50 crc kubenswrapper[4769]: I1125 10:11:50.413683 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="8b992919-0b8a-425d-9e1a-aec914a91965" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.10:5671: connect: connection refused" Nov 25 10:11:50 crc kubenswrapper[4769]: I1125 10:11:50.439155 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="df64d497-1d94-46ba-b773-da7ade77177a" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.11:5671: connect: connection refused" Nov 25 10:11:51 crc kubenswrapper[4769]: I1125 10:11:51.237232 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:11:51 crc kubenswrapper[4769]: E1125 10:11:51.237616 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:11:54 crc kubenswrapper[4769]: I1125 10:11:54.051459 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-7dvlm" event={"ID":"8738db83-202d-4c3c-bed9-2492a4a611c8","Type":"ContainerStarted","Data":"d522547551044cba146372aa9d9dcd78b1746a080a69d63ba20931ef2cd04269"} Nov 25 10:11:54 crc kubenswrapper[4769]: I1125 10:11:54.090998 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-7dvlm" podStartSLOduration=7.955133118 podStartE2EDuration="13.090951233s" podCreationTimestamp="2025-11-25 10:11:41 +0000 UTC" firstStartedPulling="2025-11-25 10:11:48.377753223 +0000 UTC m=+1656.962725536" lastFinishedPulling="2025-11-25 10:11:53.513571318 +0000 UTC m=+1662.098543651" observedRunningTime="2025-11-25 10:11:54.079347796 +0000 UTC m=+1662.664320149" watchObservedRunningTime="2025-11-25 10:11:54.090951233 +0000 UTC m=+1662.675923566" Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.104579 4769 generic.go:334] "Generic (PLEG): container finished" podID="a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1" containerID="1a40c4ebd0306d941f589b392746c158e1be9e00a1efae92685ee197c68c2d34" exitCode=0 Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.104825 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-779556746f-vswpj" event={"ID":"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1","Type":"ContainerDied","Data":"1a40c4ebd0306d941f589b392746c158e1be9e00a1efae92685ee197c68c2d34"} Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.113261 4769 generic.go:334] "Generic (PLEG): container finished" podID="8738db83-202d-4c3c-bed9-2492a4a611c8" containerID="d522547551044cba146372aa9d9dcd78b1746a080a69d63ba20931ef2cd04269" exitCode=0 Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.113310 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-7dvlm" event={"ID":"8738db83-202d-4c3c-bed9-2492a4a611c8","Type":"ContainerDied","Data":"d522547551044cba146372aa9d9dcd78b1746a080a69d63ba20931ef2cd04269"} Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.484344 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.510563 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-combined-ca-bundle\") pod \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.511019 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-config-data\") pod \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.514557 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2pgf\" (UniqueName: \"kubernetes.io/projected/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-kube-api-access-q2pgf\") pod \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.514868 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-config-data-custom\") pod \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\" (UID: \"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1\") " Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.521777 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1" (UID: "a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.521999 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-kube-api-access-q2pgf" (OuterVolumeSpecName: "kube-api-access-q2pgf") pod "a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1" (UID: "a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1"). InnerVolumeSpecName "kube-api-access-q2pgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.595077 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1" (UID: "a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.610559 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-config-data" (OuterVolumeSpecName: "config-data") pod "a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1" (UID: "a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.620496 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.620559 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2pgf\" (UniqueName: \"kubernetes.io/projected/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-kube-api-access-q2pgf\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.620597 4769 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:57 crc kubenswrapper[4769]: I1125 10:11:57.620708 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.139708 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-779556746f-vswpj" event={"ID":"a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1","Type":"ContainerDied","Data":"77bc38c50c041efab48a21c7251e400129b3d34a897027caeb3470acfe292b6f"} Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.140399 4769 scope.go:117] "RemoveContainer" containerID="1a40c4ebd0306d941f589b392746c158e1be9e00a1efae92685ee197c68c2d34" Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.139738 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-779556746f-vswpj" Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.193260 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-779556746f-vswpj"] Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.204189 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-779556746f-vswpj"] Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.282261 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1" path="/var/lib/kubelet/pods/a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1/volumes" Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.693866 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.781922 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-combined-ca-bundle\") pod \"8738db83-202d-4c3c-bed9-2492a4a611c8\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.782008 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqrtp\" (UniqueName: \"kubernetes.io/projected/8738db83-202d-4c3c-bed9-2492a4a611c8-kube-api-access-jqrtp\") pod \"8738db83-202d-4c3c-bed9-2492a4a611c8\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.782181 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-scripts\") pod \"8738db83-202d-4c3c-bed9-2492a4a611c8\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.782714 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-config-data\") pod \"8738db83-202d-4c3c-bed9-2492a4a611c8\" (UID: \"8738db83-202d-4c3c-bed9-2492a4a611c8\") " Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.791015 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8738db83-202d-4c3c-bed9-2492a4a611c8-kube-api-access-jqrtp" (OuterVolumeSpecName: "kube-api-access-jqrtp") pod "8738db83-202d-4c3c-bed9-2492a4a611c8" (UID: "8738db83-202d-4c3c-bed9-2492a4a611c8"). InnerVolumeSpecName "kube-api-access-jqrtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.792416 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-scripts" (OuterVolumeSpecName: "scripts") pod "8738db83-202d-4c3c-bed9-2492a4a611c8" (UID: "8738db83-202d-4c3c-bed9-2492a4a611c8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.835124 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-config-data" (OuterVolumeSpecName: "config-data") pod "8738db83-202d-4c3c-bed9-2492a4a611c8" (UID: "8738db83-202d-4c3c-bed9-2492a4a611c8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.843218 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8738db83-202d-4c3c-bed9-2492a4a611c8" (UID: "8738db83-202d-4c3c-bed9-2492a4a611c8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.885879 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.885923 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.885941 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqrtp\" (UniqueName: \"kubernetes.io/projected/8738db83-202d-4c3c-bed9-2492a4a611c8-kube-api-access-jqrtp\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:58 crc kubenswrapper[4769]: I1125 10:11:58.885954 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8738db83-202d-4c3c-bed9-2492a4a611c8-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:59 crc kubenswrapper[4769]: I1125 10:11:59.171485 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-7dvlm" event={"ID":"8738db83-202d-4c3c-bed9-2492a4a611c8","Type":"ContainerDied","Data":"8ef0e4a6e1d52f1daec91729e118eb5c39613a5c7006d668e754488e3748466b"} Nov 25 10:11:59 crc kubenswrapper[4769]: I1125 10:11:59.171564 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ef0e4a6e1d52f1daec91729e118eb5c39613a5c7006d668e754488e3748466b" Nov 25 10:11:59 crc kubenswrapper[4769]: I1125 10:11:59.171534 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-7dvlm" Nov 25 10:12:00 crc kubenswrapper[4769]: I1125 10:12:00.187804 4769 generic.go:334] "Generic (PLEG): container finished" podID="56107e4b-15d6-4358-ac81-c6bcc8fcc737" containerID="8da3af08ca5cc03bd9d6557b5c88c8601c6752e9f06f12adfe6c39945cd1899a" exitCode=0 Nov 25 10:12:00 crc kubenswrapper[4769]: I1125 10:12:00.187881 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" event={"ID":"56107e4b-15d6-4358-ac81-c6bcc8fcc737","Type":"ContainerDied","Data":"8da3af08ca5cc03bd9d6557b5c88c8601c6752e9f06f12adfe6c39945cd1899a"} Nov 25 10:12:00 crc kubenswrapper[4769]: I1125 10:12:00.412546 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 10:12:00 crc kubenswrapper[4769]: I1125 10:12:00.437869 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:00 crc kubenswrapper[4769]: I1125 10:12:00.745890 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 25 10:12:00 crc kubenswrapper[4769]: I1125 10:12:00.747224 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-api" containerID="cri-o://29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050" gracePeriod=30 Nov 25 10:12:00 crc kubenswrapper[4769]: I1125 10:12:00.747933 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-listener" containerID="cri-o://5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8" gracePeriod=30 Nov 25 10:12:00 crc kubenswrapper[4769]: I1125 10:12:00.748029 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-notifier" containerID="cri-o://c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc" gracePeriod=30 Nov 25 10:12:00 crc kubenswrapper[4769]: I1125 10:12:00.748076 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-evaluator" containerID="cri-o://959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506" gracePeriod=30 Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.726019 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.895364 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-inventory\") pod \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.895920 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-ssh-key\") pod \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.896310 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxzhh\" (UniqueName: \"kubernetes.io/projected/56107e4b-15d6-4358-ac81-c6bcc8fcc737-kube-api-access-wxzhh\") pod \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.896526 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-repo-setup-combined-ca-bundle\") pod \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\" (UID: \"56107e4b-15d6-4358-ac81-c6bcc8fcc737\") " Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.904935 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56107e4b-15d6-4358-ac81-c6bcc8fcc737-kube-api-access-wxzhh" (OuterVolumeSpecName: "kube-api-access-wxzhh") pod "56107e4b-15d6-4358-ac81-c6bcc8fcc737" (UID: "56107e4b-15d6-4358-ac81-c6bcc8fcc737"). InnerVolumeSpecName "kube-api-access-wxzhh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.915622 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "56107e4b-15d6-4358-ac81-c6bcc8fcc737" (UID: "56107e4b-15d6-4358-ac81-c6bcc8fcc737"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.941273 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-inventory" (OuterVolumeSpecName: "inventory") pod "56107e4b-15d6-4358-ac81-c6bcc8fcc737" (UID: "56107e4b-15d6-4358-ac81-c6bcc8fcc737"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.945727 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "56107e4b-15d6-4358-ac81-c6bcc8fcc737" (UID: "56107e4b-15d6-4358-ac81-c6bcc8fcc737"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.999253 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.999305 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxzhh\" (UniqueName: \"kubernetes.io/projected/56107e4b-15d6-4358-ac81-c6bcc8fcc737-kube-api-access-wxzhh\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.999320 4769 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:01 crc kubenswrapper[4769]: I1125 10:12:01.999330 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/56107e4b-15d6-4358-ac81-c6bcc8fcc737-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.239001 4769 generic.go:334] "Generic (PLEG): container finished" podID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerID="959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506" exitCode=0 Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.239333 4769 generic.go:334] "Generic (PLEG): container finished" podID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerID="29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050" exitCode=0 Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.257913 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.270088 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2" event={"ID":"56107e4b-15d6-4358-ac81-c6bcc8fcc737","Type":"ContainerDied","Data":"ef683106d0a9d37f5cc554cca327f77b606fd1cde2ab2f7c5a80b073ec1473d0"} Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.270147 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef683106d0a9d37f5cc554cca327f77b606fd1cde2ab2f7c5a80b073ec1473d0" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.270168 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"29183bb3-0d2f-4df7-9729-d2bdfab57b2c","Type":"ContainerDied","Data":"959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506"} Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.270191 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"29183bb3-0d2f-4df7-9729-d2bdfab57b2c","Type":"ContainerDied","Data":"29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050"} Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.318029 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l"] Nov 25 10:12:02 crc kubenswrapper[4769]: E1125 10:12:02.318695 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56107e4b-15d6-4358-ac81-c6bcc8fcc737" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.318717 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="56107e4b-15d6-4358-ac81-c6bcc8fcc737" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 10:12:02 crc kubenswrapper[4769]: E1125 10:12:02.318756 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" containerName="heat-cfnapi" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.318765 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" containerName="heat-cfnapi" Nov 25 10:12:02 crc kubenswrapper[4769]: E1125 10:12:02.318787 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8738db83-202d-4c3c-bed9-2492a4a611c8" containerName="aodh-db-sync" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.318796 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="8738db83-202d-4c3c-bed9-2492a4a611c8" containerName="aodh-db-sync" Nov 25 10:12:02 crc kubenswrapper[4769]: E1125 10:12:02.318831 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1" containerName="heat-engine" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.318839 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1" containerName="heat-engine" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.319148 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="56107e4b-15d6-4358-ac81-c6bcc8fcc737" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.319198 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="8738db83-202d-4c3c-bed9-2492a4a611c8" containerName="aodh-db-sync" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.319229 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0cdf2e4-7d2d-47e3-b79b-4dfd3cbb9cb1" containerName="heat-engine" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.319267 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5031f85-50f1-4ffe-b4c4-8ef90a3084bb" containerName="heat-cfnapi" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.320340 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.323744 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.323832 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.324017 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.324339 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.352453 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l"] Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.511209 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ndp8l\" (UID: \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.512149 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ndp8l\" (UID: \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.512255 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzkzg\" (UniqueName: \"kubernetes.io/projected/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-kube-api-access-wzkzg\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ndp8l\" (UID: \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.616543 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ndp8l\" (UID: \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.616615 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzkzg\" (UniqueName: \"kubernetes.io/projected/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-kube-api-access-wzkzg\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ndp8l\" (UID: \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.616733 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ndp8l\" (UID: \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.621636 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ndp8l\" (UID: \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.629813 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ndp8l\" (UID: \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.651676 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzkzg\" (UniqueName: \"kubernetes.io/projected/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-kube-api-access-wzkzg\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-ndp8l\" (UID: \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:02 crc kubenswrapper[4769]: I1125 10:12:02.669851 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:03 crc kubenswrapper[4769]: I1125 10:12:03.318293 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l"] Nov 25 10:12:04 crc kubenswrapper[4769]: I1125 10:12:04.280489 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" event={"ID":"080006ea-2bcf-4a74-9daa-6e11cf96a8e2","Type":"ContainerStarted","Data":"e41c991e59787c92b06953ca76e951b5e70cb0df262a572be87ea41af161ebab"} Nov 25 10:12:04 crc kubenswrapper[4769]: I1125 10:12:04.280832 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" event={"ID":"080006ea-2bcf-4a74-9daa-6e11cf96a8e2","Type":"ContainerStarted","Data":"f636e1766ef3d53ad6e32336acd2c075dba76ee8f324e5cb947dafac64f24f8e"} Nov 25 10:12:04 crc kubenswrapper[4769]: I1125 10:12:04.310044 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" podStartSLOduration=1.842921778 podStartE2EDuration="2.310024326s" podCreationTimestamp="2025-11-25 10:12:02 +0000 UTC" firstStartedPulling="2025-11-25 10:12:03.323785246 +0000 UTC m=+1671.908757559" lastFinishedPulling="2025-11-25 10:12:03.790887794 +0000 UTC m=+1672.375860107" observedRunningTime="2025-11-25 10:12:04.301319433 +0000 UTC m=+1672.886291746" watchObservedRunningTime="2025-11-25 10:12:04.310024326 +0000 UTC m=+1672.894996639" Nov 25 10:12:05 crc kubenswrapper[4769]: I1125 10:12:05.238217 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:12:05 crc kubenswrapper[4769]: E1125 10:12:05.239074 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:12:06 crc kubenswrapper[4769]: I1125 10:12:06.335797 4769 generic.go:334] "Generic (PLEG): container finished" podID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerID="5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8" exitCode=0 Nov 25 10:12:06 crc kubenswrapper[4769]: I1125 10:12:06.336617 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"29183bb3-0d2f-4df7-9729-d2bdfab57b2c","Type":"ContainerDied","Data":"5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8"} Nov 25 10:12:07 crc kubenswrapper[4769]: I1125 10:12:07.357023 4769 generic.go:334] "Generic (PLEG): container finished" podID="080006ea-2bcf-4a74-9daa-6e11cf96a8e2" containerID="e41c991e59787c92b06953ca76e951b5e70cb0df262a572be87ea41af161ebab" exitCode=0 Nov 25 10:12:07 crc kubenswrapper[4769]: I1125 10:12:07.357128 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" event={"ID":"080006ea-2bcf-4a74-9daa-6e11cf96a8e2","Type":"ContainerDied","Data":"e41c991e59787c92b06953ca76e951b5e70cb0df262a572be87ea41af161ebab"} Nov 25 10:12:07 crc kubenswrapper[4769]: I1125 10:12:07.799712 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tm6zn"] Nov 25 10:12:07 crc kubenswrapper[4769]: I1125 10:12:07.808280 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:07 crc kubenswrapper[4769]: I1125 10:12:07.818988 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tm6zn"] Nov 25 10:12:07 crc kubenswrapper[4769]: I1125 10:12:07.998637 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d186303-14bd-4046-aefc-0eece7338db6-utilities\") pod \"redhat-operators-tm6zn\" (UID: \"6d186303-14bd-4046-aefc-0eece7338db6\") " pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:07 crc kubenswrapper[4769]: I1125 10:12:07.998757 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dn4f\" (UniqueName: \"kubernetes.io/projected/6d186303-14bd-4046-aefc-0eece7338db6-kube-api-access-6dn4f\") pod \"redhat-operators-tm6zn\" (UID: \"6d186303-14bd-4046-aefc-0eece7338db6\") " pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:07 crc kubenswrapper[4769]: I1125 10:12:07.998853 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d186303-14bd-4046-aefc-0eece7338db6-catalog-content\") pod \"redhat-operators-tm6zn\" (UID: \"6d186303-14bd-4046-aefc-0eece7338db6\") " pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.102197 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d186303-14bd-4046-aefc-0eece7338db6-utilities\") pod \"redhat-operators-tm6zn\" (UID: \"6d186303-14bd-4046-aefc-0eece7338db6\") " pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.102336 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dn4f\" (UniqueName: \"kubernetes.io/projected/6d186303-14bd-4046-aefc-0eece7338db6-kube-api-access-6dn4f\") pod \"redhat-operators-tm6zn\" (UID: \"6d186303-14bd-4046-aefc-0eece7338db6\") " pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.102437 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d186303-14bd-4046-aefc-0eece7338db6-catalog-content\") pod \"redhat-operators-tm6zn\" (UID: \"6d186303-14bd-4046-aefc-0eece7338db6\") " pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.102744 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d186303-14bd-4046-aefc-0eece7338db6-utilities\") pod \"redhat-operators-tm6zn\" (UID: \"6d186303-14bd-4046-aefc-0eece7338db6\") " pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.103451 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d186303-14bd-4046-aefc-0eece7338db6-catalog-content\") pod \"redhat-operators-tm6zn\" (UID: \"6d186303-14bd-4046-aefc-0eece7338db6\") " pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.155845 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dn4f\" (UniqueName: \"kubernetes.io/projected/6d186303-14bd-4046-aefc-0eece7338db6-kube-api-access-6dn4f\") pod \"redhat-operators-tm6zn\" (UID: \"6d186303-14bd-4046-aefc-0eece7338db6\") " pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.165943 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.413841 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-w7l2p"] Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.421347 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.472456 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w7l2p"] Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.523471 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkmrg\" (UniqueName: \"kubernetes.io/projected/d0a9ab92-738a-4b49-962a-c1225d2cd761-kube-api-access-rkmrg\") pod \"redhat-marketplace-w7l2p\" (UID: \"d0a9ab92-738a-4b49-962a-c1225d2cd761\") " pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.523679 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0a9ab92-738a-4b49-962a-c1225d2cd761-catalog-content\") pod \"redhat-marketplace-w7l2p\" (UID: \"d0a9ab92-738a-4b49-962a-c1225d2cd761\") " pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.523922 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0a9ab92-738a-4b49-962a-c1225d2cd761-utilities\") pod \"redhat-marketplace-w7l2p\" (UID: \"d0a9ab92-738a-4b49-962a-c1225d2cd761\") " pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.625877 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkmrg\" (UniqueName: \"kubernetes.io/projected/d0a9ab92-738a-4b49-962a-c1225d2cd761-kube-api-access-rkmrg\") pod \"redhat-marketplace-w7l2p\" (UID: \"d0a9ab92-738a-4b49-962a-c1225d2cd761\") " pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.625939 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0a9ab92-738a-4b49-962a-c1225d2cd761-catalog-content\") pod \"redhat-marketplace-w7l2p\" (UID: \"d0a9ab92-738a-4b49-962a-c1225d2cd761\") " pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.626011 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0a9ab92-738a-4b49-962a-c1225d2cd761-utilities\") pod \"redhat-marketplace-w7l2p\" (UID: \"d0a9ab92-738a-4b49-962a-c1225d2cd761\") " pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.626686 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0a9ab92-738a-4b49-962a-c1225d2cd761-utilities\") pod \"redhat-marketplace-w7l2p\" (UID: \"d0a9ab92-738a-4b49-962a-c1225d2cd761\") " pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.626700 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0a9ab92-738a-4b49-962a-c1225d2cd761-catalog-content\") pod \"redhat-marketplace-w7l2p\" (UID: \"d0a9ab92-738a-4b49-962a-c1225d2cd761\") " pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.653399 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkmrg\" (UniqueName: \"kubernetes.io/projected/d0a9ab92-738a-4b49-962a-c1225d2cd761-kube-api-access-rkmrg\") pod \"redhat-marketplace-w7l2p\" (UID: \"d0a9ab92-738a-4b49-962a-c1225d2cd761\") " pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.779041 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tm6zn"] Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.783927 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:08 crc kubenswrapper[4769]: I1125 10:12:08.951371 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.146839 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzkzg\" (UniqueName: \"kubernetes.io/projected/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-kube-api-access-wzkzg\") pod \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\" (UID: \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\") " Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.147347 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-inventory\") pod \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\" (UID: \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\") " Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.147507 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-ssh-key\") pod \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\" (UID: \"080006ea-2bcf-4a74-9daa-6e11cf96a8e2\") " Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.159450 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-kube-api-access-wzkzg" (OuterVolumeSpecName: "kube-api-access-wzkzg") pod "080006ea-2bcf-4a74-9daa-6e11cf96a8e2" (UID: "080006ea-2bcf-4a74-9daa-6e11cf96a8e2"). InnerVolumeSpecName "kube-api-access-wzkzg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.195259 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-inventory" (OuterVolumeSpecName: "inventory") pod "080006ea-2bcf-4a74-9daa-6e11cf96a8e2" (UID: "080006ea-2bcf-4a74-9daa-6e11cf96a8e2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.198415 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "080006ea-2bcf-4a74-9daa-6e11cf96a8e2" (UID: "080006ea-2bcf-4a74-9daa-6e11cf96a8e2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.250705 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzkzg\" (UniqueName: \"kubernetes.io/projected/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-kube-api-access-wzkzg\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.250739 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.250748 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/080006ea-2bcf-4a74-9daa-6e11cf96a8e2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.350202 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.406212 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-w7l2p"] Nov 25 10:12:09 crc kubenswrapper[4769]: W1125 10:12:09.413942 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd0a9ab92_738a_4b49_962a_c1225d2cd761.slice/crio-e3926a5a12778d973a878007a87c29e14c469d7ebdfa1d842fd9e3d1774603b0 WatchSource:0}: Error finding container e3926a5a12778d973a878007a87c29e14c469d7ebdfa1d842fd9e3d1774603b0: Status 404 returned error can't find the container with id e3926a5a12778d973a878007a87c29e14c469d7ebdfa1d842fd9e3d1774603b0 Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.440162 4769 generic.go:334] "Generic (PLEG): container finished" podID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerID="c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc" exitCode=0 Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.440258 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"29183bb3-0d2f-4df7-9729-d2bdfab57b2c","Type":"ContainerDied","Data":"c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc"} Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.440302 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"29183bb3-0d2f-4df7-9729-d2bdfab57b2c","Type":"ContainerDied","Data":"efa0463c3c062b22589c15e2f0fcfcb5f8b4a977bd1b45f6b814ce40a12094cb"} Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.440322 4769 scope.go:117] "RemoveContainer" containerID="5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.448936 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.462511 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" event={"ID":"080006ea-2bcf-4a74-9daa-6e11cf96a8e2","Type":"ContainerDied","Data":"f636e1766ef3d53ad6e32336acd2c075dba76ee8f324e5cb947dafac64f24f8e"} Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.462558 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f636e1766ef3d53ad6e32336acd2c075dba76ee8f324e5cb947dafac64f24f8e" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.462648 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-ndp8l" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.464291 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-combined-ca-bundle\") pod \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.464391 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-internal-tls-certs\") pod \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.464456 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-config-data\") pod \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.464557 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-public-tls-certs\") pod \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.464703 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-24vfz\" (UniqueName: \"kubernetes.io/projected/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-kube-api-access-24vfz\") pod \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.464794 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-scripts\") pod \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\" (UID: \"29183bb3-0d2f-4df7-9729-d2bdfab57b2c\") " Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.468397 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7l2p" event={"ID":"d0a9ab92-738a-4b49-962a-c1225d2cd761","Type":"ContainerStarted","Data":"e3926a5a12778d973a878007a87c29e14c469d7ebdfa1d842fd9e3d1774603b0"} Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.477465 4769 generic.go:334] "Generic (PLEG): container finished" podID="6d186303-14bd-4046-aefc-0eece7338db6" containerID="f19c92d4927d92eb8f540f5a6180358d1ee1d828ef3122b5b9866beea1577f49" exitCode=0 Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.477521 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tm6zn" event={"ID":"6d186303-14bd-4046-aefc-0eece7338db6","Type":"ContainerDied","Data":"f19c92d4927d92eb8f540f5a6180358d1ee1d828ef3122b5b9866beea1577f49"} Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.477591 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tm6zn" event={"ID":"6d186303-14bd-4046-aefc-0eece7338db6","Type":"ContainerStarted","Data":"a03951d6bcc893fbde955942c3b054f366249ac918dd9674e353324d531ce517"} Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.548398 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-scripts" (OuterVolumeSpecName: "scripts") pod "29183bb3-0d2f-4df7-9729-d2bdfab57b2c" (UID: "29183bb3-0d2f-4df7-9729-d2bdfab57b2c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.548450 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-kube-api-access-24vfz" (OuterVolumeSpecName: "kube-api-access-24vfz") pod "29183bb3-0d2f-4df7-9729-d2bdfab57b2c" (UID: "29183bb3-0d2f-4df7-9729-d2bdfab57b2c"). InnerVolumeSpecName "kube-api-access-24vfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.574469 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-24vfz\" (UniqueName: \"kubernetes.io/projected/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-kube-api-access-24vfz\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.574430 4769 scope.go:117] "RemoveContainer" containerID="c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.574601 4769 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.576079 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6"] Nov 25 10:12:09 crc kubenswrapper[4769]: E1125 10:12:09.576672 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-api" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.576693 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-api" Nov 25 10:12:09 crc kubenswrapper[4769]: E1125 10:12:09.576728 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-listener" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.576736 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-listener" Nov 25 10:12:09 crc kubenswrapper[4769]: E1125 10:12:09.576749 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-notifier" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.576754 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-notifier" Nov 25 10:12:09 crc kubenswrapper[4769]: E1125 10:12:09.576781 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="080006ea-2bcf-4a74-9daa-6e11cf96a8e2" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.576788 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="080006ea-2bcf-4a74-9daa-6e11cf96a8e2" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 10:12:09 crc kubenswrapper[4769]: E1125 10:12:09.576813 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-evaluator" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.576820 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-evaluator" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.577085 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-api" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.577118 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-evaluator" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.577135 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-listener" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.577154 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" containerName="aodh-notifier" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.577163 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="080006ea-2bcf-4a74-9daa-6e11cf96a8e2" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.578254 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.580366 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.582180 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.582384 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.586679 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.588809 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6"] Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.660316 4769 scope.go:117] "RemoveContainer" containerID="959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.680249 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "29183bb3-0d2f-4df7-9729-d2bdfab57b2c" (UID: "29183bb3-0d2f-4df7-9729-d2bdfab57b2c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.690403 4769 scope.go:117] "RemoveContainer" containerID="29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.727102 4769 scope.go:117] "RemoveContainer" containerID="5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8" Nov 25 10:12:09 crc kubenswrapper[4769]: E1125 10:12:09.727692 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8\": container with ID starting with 5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8 not found: ID does not exist" containerID="5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.727727 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8"} err="failed to get container status \"5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8\": rpc error: code = NotFound desc = could not find container \"5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8\": container with ID starting with 5fb4214a2ed4194827bb171987628770bfa552b3f7d1647cc72d207a804189d8 not found: ID does not exist" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.727754 4769 scope.go:117] "RemoveContainer" containerID="c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc" Nov 25 10:12:09 crc kubenswrapper[4769]: E1125 10:12:09.728119 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc\": container with ID starting with c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc not found: ID does not exist" containerID="c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.728145 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc"} err="failed to get container status \"c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc\": rpc error: code = NotFound desc = could not find container \"c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc\": container with ID starting with c98ff24127309bebad1aa4b10d2660d067b86d3217f6a95b3b75bd9fea7beacc not found: ID does not exist" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.728159 4769 scope.go:117] "RemoveContainer" containerID="959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506" Nov 25 10:12:09 crc kubenswrapper[4769]: E1125 10:12:09.728561 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506\": container with ID starting with 959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506 not found: ID does not exist" containerID="959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.728586 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506"} err="failed to get container status \"959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506\": rpc error: code = NotFound desc = could not find container \"959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506\": container with ID starting with 959f3ad6d6b0d13571597c4277a2ebb0622005e5176fc5d857747007f1c19506 not found: ID does not exist" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.728600 4769 scope.go:117] "RemoveContainer" containerID="29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050" Nov 25 10:12:09 crc kubenswrapper[4769]: E1125 10:12:09.728946 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050\": container with ID starting with 29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050 not found: ID does not exist" containerID="29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.729001 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050"} err="failed to get container status \"29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050\": rpc error: code = NotFound desc = could not find container \"29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050\": container with ID starting with 29eae928ea173d70ad89a06b7fb8ad34eca62e515265faff808ffcb27ec1f050 not found: ID does not exist" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.730065 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "29183bb3-0d2f-4df7-9729-d2bdfab57b2c" (UID: "29183bb3-0d2f-4df7-9729-d2bdfab57b2c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.763495 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-config-data" (OuterVolumeSpecName: "config-data") pod "29183bb3-0d2f-4df7-9729-d2bdfab57b2c" (UID: "29183bb3-0d2f-4df7-9729-d2bdfab57b2c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.769036 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "29183bb3-0d2f-4df7-9729-d2bdfab57b2c" (UID: "29183bb3-0d2f-4df7-9729-d2bdfab57b2c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.779853 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6ftk\" (UniqueName: \"kubernetes.io/projected/77bb348e-56c3-4597-917d-5d918bfad3ca-kube-api-access-s6ftk\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.780008 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.780110 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.780344 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.780515 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.780591 4769 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.780668 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.780729 4769 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/29183bb3-0d2f-4df7-9729-d2bdfab57b2c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:09 crc kubenswrapper[4769]: E1125 10:12:09.817459 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod080006ea_2bcf_4a74_9daa_6e11cf96a8e2.slice\": RecentStats: unable to find data in memory cache]" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.882786 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.882885 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6ftk\" (UniqueName: \"kubernetes.io/projected/77bb348e-56c3-4597-917d-5d918bfad3ca-kube-api-access-s6ftk\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.882934 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.882985 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.887178 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.888516 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.892229 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.910052 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6ftk\" (UniqueName: \"kubernetes.io/projected/77bb348e-56c3-4597-917d-5d918bfad3ca-kube-api-access-s6ftk\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:09 crc kubenswrapper[4769]: I1125 10:12:09.938926 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.154064 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.185058 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.225744 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.246293 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.250766 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-5nnz9" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.250810 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.251041 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.258847 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.259236 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.288650 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29183bb3-0d2f-4df7-9729-d2bdfab57b2c" path="/var/lib/kubelet/pods/29183bb3-0d2f-4df7-9729-d2bdfab57b2c/volumes" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.289951 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.449841 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-internal-tls-certs\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.449942 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-scripts\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.450098 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-public-tls-certs\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.450120 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-config-data\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.450149 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-combined-ca-bundle\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.450459 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hp5x7\" (UniqueName: \"kubernetes.io/projected/19ab0e31-61ae-4868-8a6a-77354302637c-kube-api-access-hp5x7\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.530330 4769 generic.go:334] "Generic (PLEG): container finished" podID="d0a9ab92-738a-4b49-962a-c1225d2cd761" containerID="3709805ab394d14be9aa24342ba3c88f7577fc7e74df8a61146c14c8ce94c6a6" exitCode=0 Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.530374 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7l2p" event={"ID":"d0a9ab92-738a-4b49-962a-c1225d2cd761","Type":"ContainerDied","Data":"3709805ab394d14be9aa24342ba3c88f7577fc7e74df8a61146c14c8ce94c6a6"} Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.552595 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-internal-tls-certs\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.552799 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-scripts\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.552951 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-public-tls-certs\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.553218 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-config-data\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.553302 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-combined-ca-bundle\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.553382 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hp5x7\" (UniqueName: \"kubernetes.io/projected/19ab0e31-61ae-4868-8a6a-77354302637c-kube-api-access-hp5x7\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.561925 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-internal-tls-certs\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.562432 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-scripts\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.564956 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-config-data\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.566328 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-public-tls-certs\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.567641 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19ab0e31-61ae-4868-8a6a-77354302637c-combined-ca-bundle\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.576569 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hp5x7\" (UniqueName: \"kubernetes.io/projected/19ab0e31-61ae-4868-8a6a-77354302637c-kube-api-access-hp5x7\") pod \"aodh-0\" (UID: \"19ab0e31-61ae-4868-8a6a-77354302637c\") " pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.581019 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:12:10 crc kubenswrapper[4769]: W1125 10:12:10.639416 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77bb348e_56c3_4597_917d_5d918bfad3ca.slice/crio-cf6ad2b0956b399edf2aa176541819f8298c20cd5c6101a41a3959c917385558 WatchSource:0}: Error finding container cf6ad2b0956b399edf2aa176541819f8298c20cd5c6101a41a3959c917385558: Status 404 returned error can't find the container with id cf6ad2b0956b399edf2aa176541819f8298c20cd5c6101a41a3959c917385558 Nov 25 10:12:10 crc kubenswrapper[4769]: I1125 10:12:10.650905 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6"] Nov 25 10:12:11 crc kubenswrapper[4769]: I1125 10:12:11.125895 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:12:11 crc kubenswrapper[4769]: W1125 10:12:11.131380 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19ab0e31_61ae_4868_8a6a_77354302637c.slice/crio-be80442cafa942e89c632c4e405fd60c01ef58062327c65fad4958f7628af8f2 WatchSource:0}: Error finding container be80442cafa942e89c632c4e405fd60c01ef58062327c65fad4958f7628af8f2: Status 404 returned error can't find the container with id be80442cafa942e89c632c4e405fd60c01ef58062327c65fad4958f7628af8f2 Nov 25 10:12:11 crc kubenswrapper[4769]: I1125 10:12:11.561356 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tm6zn" event={"ID":"6d186303-14bd-4046-aefc-0eece7338db6","Type":"ContainerStarted","Data":"3d4221303053e3db612c6348e23fe6f853b5d20b116a52f820cfa10b5d44d372"} Nov 25 10:12:11 crc kubenswrapper[4769]: I1125 10:12:11.565330 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" event={"ID":"77bb348e-56c3-4597-917d-5d918bfad3ca","Type":"ContainerStarted","Data":"6e9ad158876d4e2310c510099565c3b8cb1b245f2fe131b5e336ba49467414e8"} Nov 25 10:12:11 crc kubenswrapper[4769]: I1125 10:12:11.565396 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" event={"ID":"77bb348e-56c3-4597-917d-5d918bfad3ca","Type":"ContainerStarted","Data":"cf6ad2b0956b399edf2aa176541819f8298c20cd5c6101a41a3959c917385558"} Nov 25 10:12:11 crc kubenswrapper[4769]: I1125 10:12:11.574164 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"19ab0e31-61ae-4868-8a6a-77354302637c","Type":"ContainerStarted","Data":"be80442cafa942e89c632c4e405fd60c01ef58062327c65fad4958f7628af8f2"} Nov 25 10:12:11 crc kubenswrapper[4769]: I1125 10:12:11.621480 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" podStartSLOduration=2.117256559 podStartE2EDuration="2.621456099s" podCreationTimestamp="2025-11-25 10:12:09 +0000 UTC" firstStartedPulling="2025-11-25 10:12:10.643339827 +0000 UTC m=+1679.228312140" lastFinishedPulling="2025-11-25 10:12:11.147539367 +0000 UTC m=+1679.732511680" observedRunningTime="2025-11-25 10:12:11.615190248 +0000 UTC m=+1680.200162561" watchObservedRunningTime="2025-11-25 10:12:11.621456099 +0000 UTC m=+1680.206428412" Nov 25 10:12:12 crc kubenswrapper[4769]: I1125 10:12:12.595080 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"19ab0e31-61ae-4868-8a6a-77354302637c","Type":"ContainerStarted","Data":"85b229f6868df2b32d8b88f1c5b5fb8f8603d5e25e5b3373e1d63af8e2836b69"} Nov 25 10:12:12 crc kubenswrapper[4769]: I1125 10:12:12.602009 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7l2p" event={"ID":"d0a9ab92-738a-4b49-962a-c1225d2cd761","Type":"ContainerStarted","Data":"a29964752226848eadd36e16309d40d839e94cb2373f2fd8db91d9d9bb486ff1"} Nov 25 10:12:14 crc kubenswrapper[4769]: I1125 10:12:14.634645 4769 generic.go:334] "Generic (PLEG): container finished" podID="d0a9ab92-738a-4b49-962a-c1225d2cd761" containerID="a29964752226848eadd36e16309d40d839e94cb2373f2fd8db91d9d9bb486ff1" exitCode=0 Nov 25 10:12:14 crc kubenswrapper[4769]: I1125 10:12:14.634741 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7l2p" event={"ID":"d0a9ab92-738a-4b49-962a-c1225d2cd761","Type":"ContainerDied","Data":"a29964752226848eadd36e16309d40d839e94cb2373f2fd8db91d9d9bb486ff1"} Nov 25 10:12:15 crc kubenswrapper[4769]: I1125 10:12:15.656847 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"19ab0e31-61ae-4868-8a6a-77354302637c","Type":"ContainerStarted","Data":"c106c605f54dccb28db8811355ae1961b51c9f377fd056bf261981b398d0b3ab"} Nov 25 10:12:16 crc kubenswrapper[4769]: I1125 10:12:16.670068 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7l2p" event={"ID":"d0a9ab92-738a-4b49-962a-c1225d2cd761","Type":"ContainerStarted","Data":"3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220"} Nov 25 10:12:16 crc kubenswrapper[4769]: I1125 10:12:16.697404 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-w7l2p" podStartSLOduration=3.668475834 podStartE2EDuration="8.697387569s" podCreationTimestamp="2025-11-25 10:12:08 +0000 UTC" firstStartedPulling="2025-11-25 10:12:10.536094657 +0000 UTC m=+1679.121066970" lastFinishedPulling="2025-11-25 10:12:15.565006382 +0000 UTC m=+1684.149978705" observedRunningTime="2025-11-25 10:12:16.691618221 +0000 UTC m=+1685.276590524" watchObservedRunningTime="2025-11-25 10:12:16.697387569 +0000 UTC m=+1685.282359882" Nov 25 10:12:17 crc kubenswrapper[4769]: I1125 10:12:17.237710 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:12:17 crc kubenswrapper[4769]: E1125 10:12:17.238102 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:12:17 crc kubenswrapper[4769]: I1125 10:12:17.697086 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"19ab0e31-61ae-4868-8a6a-77354302637c","Type":"ContainerStarted","Data":"cf0ff02d29932a9d31177d29e5249a91f3448a7a4d2b91caf6322b7873b3e2c2"} Nov 25 10:12:17 crc kubenswrapper[4769]: I1125 10:12:17.701811 4769 generic.go:334] "Generic (PLEG): container finished" podID="6d186303-14bd-4046-aefc-0eece7338db6" containerID="3d4221303053e3db612c6348e23fe6f853b5d20b116a52f820cfa10b5d44d372" exitCode=0 Nov 25 10:12:17 crc kubenswrapper[4769]: I1125 10:12:17.701860 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tm6zn" event={"ID":"6d186303-14bd-4046-aefc-0eece7338db6","Type":"ContainerDied","Data":"3d4221303053e3db612c6348e23fe6f853b5d20b116a52f820cfa10b5d44d372"} Nov 25 10:12:18 crc kubenswrapper[4769]: I1125 10:12:18.784553 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:18 crc kubenswrapper[4769]: I1125 10:12:18.784928 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:19 crc kubenswrapper[4769]: I1125 10:12:19.726193 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"19ab0e31-61ae-4868-8a6a-77354302637c","Type":"ContainerStarted","Data":"7cd830ce30f757d7a4f42326da2001fe7698ba64fbe021a353c356d556966205"} Nov 25 10:12:19 crc kubenswrapper[4769]: I1125 10:12:19.730595 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tm6zn" event={"ID":"6d186303-14bd-4046-aefc-0eece7338db6","Type":"ContainerStarted","Data":"ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c"} Nov 25 10:12:19 crc kubenswrapper[4769]: I1125 10:12:19.757970 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.494151153 podStartE2EDuration="9.757940464s" podCreationTimestamp="2025-11-25 10:12:10 +0000 UTC" firstStartedPulling="2025-11-25 10:12:11.146214193 +0000 UTC m=+1679.731186506" lastFinishedPulling="2025-11-25 10:12:18.410003494 +0000 UTC m=+1686.994975817" observedRunningTime="2025-11-25 10:12:19.74848368 +0000 UTC m=+1688.333455993" watchObservedRunningTime="2025-11-25 10:12:19.757940464 +0000 UTC m=+1688.342912777" Nov 25 10:12:19 crc kubenswrapper[4769]: I1125 10:12:19.777959 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tm6zn" podStartSLOduration=3.852643954 podStartE2EDuration="12.777933032s" podCreationTimestamp="2025-11-25 10:12:07 +0000 UTC" firstStartedPulling="2025-11-25 10:12:09.486085763 +0000 UTC m=+1678.071058076" lastFinishedPulling="2025-11-25 10:12:18.411374831 +0000 UTC m=+1686.996347154" observedRunningTime="2025-11-25 10:12:19.772758673 +0000 UTC m=+1688.357730996" watchObservedRunningTime="2025-11-25 10:12:19.777933032 +0000 UTC m=+1688.362905365" Nov 25 10:12:19 crc kubenswrapper[4769]: I1125 10:12:19.853579 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-w7l2p" podUID="d0a9ab92-738a-4b49-962a-c1225d2cd761" containerName="registry-server" probeResult="failure" output=< Nov 25 10:12:19 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 10:12:19 crc kubenswrapper[4769]: > Nov 25 10:12:28 crc kubenswrapper[4769]: I1125 10:12:28.166182 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:28 crc kubenswrapper[4769]: I1125 10:12:28.167136 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:28 crc kubenswrapper[4769]: I1125 10:12:28.864047 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:28 crc kubenswrapper[4769]: I1125 10:12:28.922740 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:29 crc kubenswrapper[4769]: I1125 10:12:29.126936 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-w7l2p"] Nov 25 10:12:29 crc kubenswrapper[4769]: I1125 10:12:29.238304 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:12:29 crc kubenswrapper[4769]: E1125 10:12:29.238909 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:12:29 crc kubenswrapper[4769]: I1125 10:12:29.273520 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tm6zn" podUID="6d186303-14bd-4046-aefc-0eece7338db6" containerName="registry-server" probeResult="failure" output=< Nov 25 10:12:29 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 10:12:29 crc kubenswrapper[4769]: > Nov 25 10:12:30 crc kubenswrapper[4769]: I1125 10:12:30.926390 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-w7l2p" podUID="d0a9ab92-738a-4b49-962a-c1225d2cd761" containerName="registry-server" containerID="cri-o://3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220" gracePeriod=2 Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.554493 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.713151 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0a9ab92-738a-4b49-962a-c1225d2cd761-catalog-content\") pod \"d0a9ab92-738a-4b49-962a-c1225d2cd761\" (UID: \"d0a9ab92-738a-4b49-962a-c1225d2cd761\") " Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.713285 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rkmrg\" (UniqueName: \"kubernetes.io/projected/d0a9ab92-738a-4b49-962a-c1225d2cd761-kube-api-access-rkmrg\") pod \"d0a9ab92-738a-4b49-962a-c1225d2cd761\" (UID: \"d0a9ab92-738a-4b49-962a-c1225d2cd761\") " Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.713674 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0a9ab92-738a-4b49-962a-c1225d2cd761-utilities\") pod \"d0a9ab92-738a-4b49-962a-c1225d2cd761\" (UID: \"d0a9ab92-738a-4b49-962a-c1225d2cd761\") " Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.714190 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0a9ab92-738a-4b49-962a-c1225d2cd761-utilities" (OuterVolumeSpecName: "utilities") pod "d0a9ab92-738a-4b49-962a-c1225d2cd761" (UID: "d0a9ab92-738a-4b49-962a-c1225d2cd761"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.722267 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0a9ab92-738a-4b49-962a-c1225d2cd761-kube-api-access-rkmrg" (OuterVolumeSpecName: "kube-api-access-rkmrg") pod "d0a9ab92-738a-4b49-962a-c1225d2cd761" (UID: "d0a9ab92-738a-4b49-962a-c1225d2cd761"). InnerVolumeSpecName "kube-api-access-rkmrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.735942 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0a9ab92-738a-4b49-962a-c1225d2cd761-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d0a9ab92-738a-4b49-962a-c1225d2cd761" (UID: "d0a9ab92-738a-4b49-962a-c1225d2cd761"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.817455 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0a9ab92-738a-4b49-962a-c1225d2cd761-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.817512 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0a9ab92-738a-4b49-962a-c1225d2cd761-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.817539 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rkmrg\" (UniqueName: \"kubernetes.io/projected/d0a9ab92-738a-4b49-962a-c1225d2cd761-kube-api-access-rkmrg\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.942680 4769 generic.go:334] "Generic (PLEG): container finished" podID="d0a9ab92-738a-4b49-962a-c1225d2cd761" containerID="3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220" exitCode=0 Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.942747 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7l2p" event={"ID":"d0a9ab92-738a-4b49-962a-c1225d2cd761","Type":"ContainerDied","Data":"3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220"} Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.942728 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-w7l2p" Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.942792 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-w7l2p" event={"ID":"d0a9ab92-738a-4b49-962a-c1225d2cd761","Type":"ContainerDied","Data":"e3926a5a12778d973a878007a87c29e14c469d7ebdfa1d842fd9e3d1774603b0"} Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.942822 4769 scope.go:117] "RemoveContainer" containerID="3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220" Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.984703 4769 scope.go:117] "RemoveContainer" containerID="a29964752226848eadd36e16309d40d839e94cb2373f2fd8db91d9d9bb486ff1" Nov 25 10:12:31 crc kubenswrapper[4769]: I1125 10:12:31.997477 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-w7l2p"] Nov 25 10:12:32 crc kubenswrapper[4769]: I1125 10:12:32.004921 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-w7l2p"] Nov 25 10:12:32 crc kubenswrapper[4769]: I1125 10:12:32.015566 4769 scope.go:117] "RemoveContainer" containerID="3709805ab394d14be9aa24342ba3c88f7577fc7e74df8a61146c14c8ce94c6a6" Nov 25 10:12:32 crc kubenswrapper[4769]: I1125 10:12:32.095895 4769 scope.go:117] "RemoveContainer" containerID="3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220" Nov 25 10:12:32 crc kubenswrapper[4769]: E1125 10:12:32.096410 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220\": container with ID starting with 3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220 not found: ID does not exist" containerID="3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220" Nov 25 10:12:32 crc kubenswrapper[4769]: I1125 10:12:32.096466 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220"} err="failed to get container status \"3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220\": rpc error: code = NotFound desc = could not find container \"3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220\": container with ID starting with 3a8820d6ee0d3332f2ca354707d2a07a09047c4c6ced4ed1190f1f9862fae220 not found: ID does not exist" Nov 25 10:12:32 crc kubenswrapper[4769]: I1125 10:12:32.096506 4769 scope.go:117] "RemoveContainer" containerID="a29964752226848eadd36e16309d40d839e94cb2373f2fd8db91d9d9bb486ff1" Nov 25 10:12:32 crc kubenswrapper[4769]: E1125 10:12:32.097221 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a29964752226848eadd36e16309d40d839e94cb2373f2fd8db91d9d9bb486ff1\": container with ID starting with a29964752226848eadd36e16309d40d839e94cb2373f2fd8db91d9d9bb486ff1 not found: ID does not exist" containerID="a29964752226848eadd36e16309d40d839e94cb2373f2fd8db91d9d9bb486ff1" Nov 25 10:12:32 crc kubenswrapper[4769]: I1125 10:12:32.097369 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a29964752226848eadd36e16309d40d839e94cb2373f2fd8db91d9d9bb486ff1"} err="failed to get container status \"a29964752226848eadd36e16309d40d839e94cb2373f2fd8db91d9d9bb486ff1\": rpc error: code = NotFound desc = could not find container \"a29964752226848eadd36e16309d40d839e94cb2373f2fd8db91d9d9bb486ff1\": container with ID starting with a29964752226848eadd36e16309d40d839e94cb2373f2fd8db91d9d9bb486ff1 not found: ID does not exist" Nov 25 10:12:32 crc kubenswrapper[4769]: I1125 10:12:32.097476 4769 scope.go:117] "RemoveContainer" containerID="3709805ab394d14be9aa24342ba3c88f7577fc7e74df8a61146c14c8ce94c6a6" Nov 25 10:12:32 crc kubenswrapper[4769]: E1125 10:12:32.097925 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3709805ab394d14be9aa24342ba3c88f7577fc7e74df8a61146c14c8ce94c6a6\": container with ID starting with 3709805ab394d14be9aa24342ba3c88f7577fc7e74df8a61146c14c8ce94c6a6 not found: ID does not exist" containerID="3709805ab394d14be9aa24342ba3c88f7577fc7e74df8a61146c14c8ce94c6a6" Nov 25 10:12:32 crc kubenswrapper[4769]: I1125 10:12:32.097989 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3709805ab394d14be9aa24342ba3c88f7577fc7e74df8a61146c14c8ce94c6a6"} err="failed to get container status \"3709805ab394d14be9aa24342ba3c88f7577fc7e74df8a61146c14c8ce94c6a6\": rpc error: code = NotFound desc = could not find container \"3709805ab394d14be9aa24342ba3c88f7577fc7e74df8a61146c14c8ce94c6a6\": container with ID starting with 3709805ab394d14be9aa24342ba3c88f7577fc7e74df8a61146c14c8ce94c6a6 not found: ID does not exist" Nov 25 10:12:32 crc kubenswrapper[4769]: I1125 10:12:32.254828 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0a9ab92-738a-4b49-962a-c1225d2cd761" path="/var/lib/kubelet/pods/d0a9ab92-738a-4b49-962a-c1225d2cd761/volumes" Nov 25 10:12:37 crc kubenswrapper[4769]: I1125 10:12:37.907373 4769 scope.go:117] "RemoveContainer" containerID="17ea494fb30ab2fb842a1d0094d13445137fc4c5cf53906984aa735164ec3a12" Nov 25 10:12:37 crc kubenswrapper[4769]: I1125 10:12:37.965151 4769 scope.go:117] "RemoveContainer" containerID="e0ebfa1cf62473ea4d5841cc8213e9832599f8c25f90fcd72a7c7ecd99c84878" Nov 25 10:12:38 crc kubenswrapper[4769]: I1125 10:12:38.020142 4769 scope.go:117] "RemoveContainer" containerID="d91a490871ece7da51e72a6b88e953bd60b6b6e869189972281a12d961f44b73" Nov 25 10:12:38 crc kubenswrapper[4769]: I1125 10:12:38.075177 4769 scope.go:117] "RemoveContainer" containerID="c382688aa5e0a4dfc850c1e90963dd8063c3ffd50a52c7e99ad9a7bdc9081fc8" Nov 25 10:12:38 crc kubenswrapper[4769]: I1125 10:12:38.119206 4769 scope.go:117] "RemoveContainer" containerID="65f779b115981bb11398df135b35a40ffc144a6aa60ffe09482b5df5ada622eb" Nov 25 10:12:39 crc kubenswrapper[4769]: I1125 10:12:39.291926 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tm6zn" podUID="6d186303-14bd-4046-aefc-0eece7338db6" containerName="registry-server" probeResult="failure" output=< Nov 25 10:12:39 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 10:12:39 crc kubenswrapper[4769]: > Nov 25 10:12:42 crc kubenswrapper[4769]: I1125 10:12:42.246351 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:12:42 crc kubenswrapper[4769]: E1125 10:12:42.246903 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:12:49 crc kubenswrapper[4769]: I1125 10:12:49.263614 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tm6zn" podUID="6d186303-14bd-4046-aefc-0eece7338db6" containerName="registry-server" probeResult="failure" output=< Nov 25 10:12:49 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 10:12:49 crc kubenswrapper[4769]: > Nov 25 10:12:54 crc kubenswrapper[4769]: I1125 10:12:54.238491 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:12:54 crc kubenswrapper[4769]: E1125 10:12:54.239984 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:12:58 crc kubenswrapper[4769]: I1125 10:12:58.286651 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:58 crc kubenswrapper[4769]: I1125 10:12:58.372436 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:12:58 crc kubenswrapper[4769]: I1125 10:12:58.541100 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tm6zn"] Nov 25 10:12:59 crc kubenswrapper[4769]: I1125 10:12:59.428761 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tm6zn" podUID="6d186303-14bd-4046-aefc-0eece7338db6" containerName="registry-server" containerID="cri-o://ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c" gracePeriod=2 Nov 25 10:12:59 crc kubenswrapper[4769]: I1125 10:12:59.982295 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.051066 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dn4f\" (UniqueName: \"kubernetes.io/projected/6d186303-14bd-4046-aefc-0eece7338db6-kube-api-access-6dn4f\") pod \"6d186303-14bd-4046-aefc-0eece7338db6\" (UID: \"6d186303-14bd-4046-aefc-0eece7338db6\") " Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.051186 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d186303-14bd-4046-aefc-0eece7338db6-utilities\") pod \"6d186303-14bd-4046-aefc-0eece7338db6\" (UID: \"6d186303-14bd-4046-aefc-0eece7338db6\") " Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.051343 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d186303-14bd-4046-aefc-0eece7338db6-catalog-content\") pod \"6d186303-14bd-4046-aefc-0eece7338db6\" (UID: \"6d186303-14bd-4046-aefc-0eece7338db6\") " Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.052503 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d186303-14bd-4046-aefc-0eece7338db6-utilities" (OuterVolumeSpecName: "utilities") pod "6d186303-14bd-4046-aefc-0eece7338db6" (UID: "6d186303-14bd-4046-aefc-0eece7338db6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.057900 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d186303-14bd-4046-aefc-0eece7338db6-kube-api-access-6dn4f" (OuterVolumeSpecName: "kube-api-access-6dn4f") pod "6d186303-14bd-4046-aefc-0eece7338db6" (UID: "6d186303-14bd-4046-aefc-0eece7338db6"). InnerVolumeSpecName "kube-api-access-6dn4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.147700 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d186303-14bd-4046-aefc-0eece7338db6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6d186303-14bd-4046-aefc-0eece7338db6" (UID: "6d186303-14bd-4046-aefc-0eece7338db6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.155597 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dn4f\" (UniqueName: \"kubernetes.io/projected/6d186303-14bd-4046-aefc-0eece7338db6-kube-api-access-6dn4f\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.155661 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6d186303-14bd-4046-aefc-0eece7338db6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.155729 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6d186303-14bd-4046-aefc-0eece7338db6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.464433 4769 generic.go:334] "Generic (PLEG): container finished" podID="6d186303-14bd-4046-aefc-0eece7338db6" containerID="ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c" exitCode=0 Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.464483 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tm6zn" event={"ID":"6d186303-14bd-4046-aefc-0eece7338db6","Type":"ContainerDied","Data":"ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c"} Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.464513 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tm6zn" event={"ID":"6d186303-14bd-4046-aefc-0eece7338db6","Type":"ContainerDied","Data":"a03951d6bcc893fbde955942c3b054f366249ac918dd9674e353324d531ce517"} Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.464534 4769 scope.go:117] "RemoveContainer" containerID="ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.464596 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tm6zn" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.511546 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tm6zn"] Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.522488 4769 scope.go:117] "RemoveContainer" containerID="3d4221303053e3db612c6348e23fe6f853b5d20b116a52f820cfa10b5d44d372" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.526882 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tm6zn"] Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.565596 4769 scope.go:117] "RemoveContainer" containerID="f19c92d4927d92eb8f540f5a6180358d1ee1d828ef3122b5b9866beea1577f49" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.630067 4769 scope.go:117] "RemoveContainer" containerID="ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c" Nov 25 10:13:00 crc kubenswrapper[4769]: E1125 10:13:00.630772 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c\": container with ID starting with ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c not found: ID does not exist" containerID="ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.630831 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c"} err="failed to get container status \"ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c\": rpc error: code = NotFound desc = could not find container \"ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c\": container with ID starting with ac19e02f5f9d5a5c633217b874840efd58101237929a9fdadd02bdecb83aa86c not found: ID does not exist" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.630889 4769 scope.go:117] "RemoveContainer" containerID="3d4221303053e3db612c6348e23fe6f853b5d20b116a52f820cfa10b5d44d372" Nov 25 10:13:00 crc kubenswrapper[4769]: E1125 10:13:00.631953 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d4221303053e3db612c6348e23fe6f853b5d20b116a52f820cfa10b5d44d372\": container with ID starting with 3d4221303053e3db612c6348e23fe6f853b5d20b116a52f820cfa10b5d44d372 not found: ID does not exist" containerID="3d4221303053e3db612c6348e23fe6f853b5d20b116a52f820cfa10b5d44d372" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.632129 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d4221303053e3db612c6348e23fe6f853b5d20b116a52f820cfa10b5d44d372"} err="failed to get container status \"3d4221303053e3db612c6348e23fe6f853b5d20b116a52f820cfa10b5d44d372\": rpc error: code = NotFound desc = could not find container \"3d4221303053e3db612c6348e23fe6f853b5d20b116a52f820cfa10b5d44d372\": container with ID starting with 3d4221303053e3db612c6348e23fe6f853b5d20b116a52f820cfa10b5d44d372 not found: ID does not exist" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.632284 4769 scope.go:117] "RemoveContainer" containerID="f19c92d4927d92eb8f540f5a6180358d1ee1d828ef3122b5b9866beea1577f49" Nov 25 10:13:00 crc kubenswrapper[4769]: E1125 10:13:00.633042 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f19c92d4927d92eb8f540f5a6180358d1ee1d828ef3122b5b9866beea1577f49\": container with ID starting with f19c92d4927d92eb8f540f5a6180358d1ee1d828ef3122b5b9866beea1577f49 not found: ID does not exist" containerID="f19c92d4927d92eb8f540f5a6180358d1ee1d828ef3122b5b9866beea1577f49" Nov 25 10:13:00 crc kubenswrapper[4769]: I1125 10:13:00.633095 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f19c92d4927d92eb8f540f5a6180358d1ee1d828ef3122b5b9866beea1577f49"} err="failed to get container status \"f19c92d4927d92eb8f540f5a6180358d1ee1d828ef3122b5b9866beea1577f49\": rpc error: code = NotFound desc = could not find container \"f19c92d4927d92eb8f540f5a6180358d1ee1d828ef3122b5b9866beea1577f49\": container with ID starting with f19c92d4927d92eb8f540f5a6180358d1ee1d828ef3122b5b9866beea1577f49 not found: ID does not exist" Nov 25 10:13:02 crc kubenswrapper[4769]: I1125 10:13:02.264136 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d186303-14bd-4046-aefc-0eece7338db6" path="/var/lib/kubelet/pods/6d186303-14bd-4046-aefc-0eece7338db6/volumes" Nov 25 10:13:05 crc kubenswrapper[4769]: I1125 10:13:05.237908 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:13:05 crc kubenswrapper[4769]: E1125 10:13:05.239138 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:13:17 crc kubenswrapper[4769]: I1125 10:13:17.237935 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:13:17 crc kubenswrapper[4769]: E1125 10:13:17.239519 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:13:29 crc kubenswrapper[4769]: I1125 10:13:29.239504 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:13:29 crc kubenswrapper[4769]: E1125 10:13:29.242852 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:13:37 crc kubenswrapper[4769]: I1125 10:13:37.565829 4769 patch_prober.go:28] interesting pod/logging-loki-gateway-5bdd8fd454-klwtc container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.79:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:13:37 crc kubenswrapper[4769]: I1125 10:13:37.566794 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" podUID="f535e254-5602-4794-9f47-e9bb2c1454b2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.79:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:13:38 crc kubenswrapper[4769]: I1125 10:13:38.501619 4769 scope.go:117] "RemoveContainer" containerID="9faac2f35eb8dd5f4919645703894574f6367d783e430dda2b8f706433016f18" Nov 25 10:13:40 crc kubenswrapper[4769]: I1125 10:13:40.237904 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:13:40 crc kubenswrapper[4769]: E1125 10:13:40.239408 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:13:43 crc kubenswrapper[4769]: I1125 10:13:43.348164 4769 scope.go:117] "RemoveContainer" containerID="6e87b8e8e24fee05ec911efe798d5d590b46bd2fd850b4c0f1cd1a9ff1457c3d" Nov 25 10:13:43 crc kubenswrapper[4769]: I1125 10:13:43.409938 4769 scope.go:117] "RemoveContainer" containerID="43b0c1f50714851eab120856f7380219c1d5f23176617c933003637600370eb3" Nov 25 10:13:43 crc kubenswrapper[4769]: I1125 10:13:43.483891 4769 scope.go:117] "RemoveContainer" containerID="4b84b1aa69963dad2fd6727776c50ccd8e6eb53f4ef4e9a309bc9059cadc7147" Nov 25 10:13:43 crc kubenswrapper[4769]: I1125 10:13:43.526187 4769 scope.go:117] "RemoveContainer" containerID="65e0ebd4b0b43e9d547b1935266f9949553d882d7a5139aa2a766a8c369b0e05" Nov 25 10:13:43 crc kubenswrapper[4769]: I1125 10:13:43.577574 4769 scope.go:117] "RemoveContainer" containerID="b29fbee263efb754baf1bfbecd2cbb37e73412708dcac6dfb72529016552877d" Nov 25 10:13:43 crc kubenswrapper[4769]: I1125 10:13:43.653953 4769 scope.go:117] "RemoveContainer" containerID="fe8db9b6bad2ed958730351d2ead96e4d5954d2392ae06cf9cc0244d66f2b31c" Nov 25 10:13:43 crc kubenswrapper[4769]: I1125 10:13:43.712242 4769 scope.go:117] "RemoveContainer" containerID="35177ce8beaa44ed1615ea137ad50b003688350d023d046b8c6bf95e65b95264" Nov 25 10:13:51 crc kubenswrapper[4769]: I1125 10:13:51.238289 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:13:51 crc kubenswrapper[4769]: E1125 10:13:51.239693 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:14:04 crc kubenswrapper[4769]: I1125 10:14:04.238286 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:14:04 crc kubenswrapper[4769]: E1125 10:14:04.239336 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:14:16 crc kubenswrapper[4769]: I1125 10:14:16.238892 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:14:16 crc kubenswrapper[4769]: E1125 10:14:16.240660 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:14:31 crc kubenswrapper[4769]: I1125 10:14:31.238522 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:14:31 crc kubenswrapper[4769]: E1125 10:14:31.241129 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:14:43 crc kubenswrapper[4769]: I1125 10:14:43.237835 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:14:43 crc kubenswrapper[4769]: E1125 10:14:43.241039 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:14:44 crc kubenswrapper[4769]: I1125 10:14:44.017663 4769 scope.go:117] "RemoveContainer" containerID="b5b8d8c06083bbd215e2b4f4292494655933053fd68ab294674ab76522deebbe" Nov 25 10:14:44 crc kubenswrapper[4769]: I1125 10:14:44.063241 4769 scope.go:117] "RemoveContainer" containerID="f1198c60590d8d1f401e3318ddb6cc136ac2779af8cfac1481eead81cbdbc938" Nov 25 10:14:44 crc kubenswrapper[4769]: I1125 10:14:44.091704 4769 scope.go:117] "RemoveContainer" containerID="3e579c48049c0f0e7ca98e7128a5bdb2b3249d1ee77c4fdef2890b33b1b008e6" Nov 25 10:14:44 crc kubenswrapper[4769]: I1125 10:14:44.122527 4769 scope.go:117] "RemoveContainer" containerID="6fb267bebeeda4111c0eb8d9768069b47226b6ce6d86b53830e2f5d2310a4f58" Nov 25 10:14:54 crc kubenswrapper[4769]: I1125 10:14:54.238334 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:14:54 crc kubenswrapper[4769]: E1125 10:14:54.239915 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.187036 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns"] Nov 25 10:15:00 crc kubenswrapper[4769]: E1125 10:15:00.189635 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d186303-14bd-4046-aefc-0eece7338db6" containerName="extract-content" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.189772 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d186303-14bd-4046-aefc-0eece7338db6" containerName="extract-content" Nov 25 10:15:00 crc kubenswrapper[4769]: E1125 10:15:00.189873 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a9ab92-738a-4b49-962a-c1225d2cd761" containerName="extract-utilities" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.189951 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a9ab92-738a-4b49-962a-c1225d2cd761" containerName="extract-utilities" Nov 25 10:15:00 crc kubenswrapper[4769]: E1125 10:15:00.190060 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a9ab92-738a-4b49-962a-c1225d2cd761" containerName="extract-content" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.190139 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a9ab92-738a-4b49-962a-c1225d2cd761" containerName="extract-content" Nov 25 10:15:00 crc kubenswrapper[4769]: E1125 10:15:00.190225 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d186303-14bd-4046-aefc-0eece7338db6" containerName="registry-server" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.190298 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d186303-14bd-4046-aefc-0eece7338db6" containerName="registry-server" Nov 25 10:15:00 crc kubenswrapper[4769]: E1125 10:15:00.190373 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d186303-14bd-4046-aefc-0eece7338db6" containerName="extract-utilities" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.190436 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d186303-14bd-4046-aefc-0eece7338db6" containerName="extract-utilities" Nov 25 10:15:00 crc kubenswrapper[4769]: E1125 10:15:00.191898 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a9ab92-738a-4b49-962a-c1225d2cd761" containerName="registry-server" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.192018 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a9ab92-738a-4b49-962a-c1225d2cd761" containerName="registry-server" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.192438 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d186303-14bd-4046-aefc-0eece7338db6" containerName="registry-server" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.192573 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0a9ab92-738a-4b49-962a-c1225d2cd761" containerName="registry-server" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.193777 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.195997 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.196470 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.198877 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns"] Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.320475 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bef55661-600d-4213-8b7a-d1ac8fec5b6b-secret-volume\") pod \"collect-profiles-29401095-9phns\" (UID: \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.320626 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bef55661-600d-4213-8b7a-d1ac8fec5b6b-config-volume\") pod \"collect-profiles-29401095-9phns\" (UID: \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.320649 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkd8v\" (UniqueName: \"kubernetes.io/projected/bef55661-600d-4213-8b7a-d1ac8fec5b6b-kube-api-access-hkd8v\") pod \"collect-profiles-29401095-9phns\" (UID: \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.423385 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bef55661-600d-4213-8b7a-d1ac8fec5b6b-config-volume\") pod \"collect-profiles-29401095-9phns\" (UID: \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.423443 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkd8v\" (UniqueName: \"kubernetes.io/projected/bef55661-600d-4213-8b7a-d1ac8fec5b6b-kube-api-access-hkd8v\") pod \"collect-profiles-29401095-9phns\" (UID: \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.423782 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bef55661-600d-4213-8b7a-d1ac8fec5b6b-secret-volume\") pod \"collect-profiles-29401095-9phns\" (UID: \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.424687 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bef55661-600d-4213-8b7a-d1ac8fec5b6b-config-volume\") pod \"collect-profiles-29401095-9phns\" (UID: \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.432666 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bef55661-600d-4213-8b7a-d1ac8fec5b6b-secret-volume\") pod \"collect-profiles-29401095-9phns\" (UID: \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.455927 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkd8v\" (UniqueName: \"kubernetes.io/projected/bef55661-600d-4213-8b7a-d1ac8fec5b6b-kube-api-access-hkd8v\") pod \"collect-profiles-29401095-9phns\" (UID: \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:00 crc kubenswrapper[4769]: I1125 10:15:00.526116 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:01 crc kubenswrapper[4769]: I1125 10:15:01.025910 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns"] Nov 25 10:15:01 crc kubenswrapper[4769]: I1125 10:15:01.465499 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" event={"ID":"bef55661-600d-4213-8b7a-d1ac8fec5b6b","Type":"ContainerStarted","Data":"cba2d6c2679932ed0a4e4cc0bc3ec8db000eb1206ecba9d90ac273d5742b29c6"} Nov 25 10:15:01 crc kubenswrapper[4769]: I1125 10:15:01.466073 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" event={"ID":"bef55661-600d-4213-8b7a-d1ac8fec5b6b","Type":"ContainerStarted","Data":"7e1c18320f6f3dd208ce2be7ffd3a03e6cc125db6b1accc3e1775cbbac35acca"} Nov 25 10:15:01 crc kubenswrapper[4769]: I1125 10:15:01.495288 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" podStartSLOduration=1.495265729 podStartE2EDuration="1.495265729s" podCreationTimestamp="2025-11-25 10:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:15:01.479910696 +0000 UTC m=+1850.064883009" watchObservedRunningTime="2025-11-25 10:15:01.495265729 +0000 UTC m=+1850.080238042" Nov 25 10:15:02 crc kubenswrapper[4769]: I1125 10:15:02.479581 4769 generic.go:334] "Generic (PLEG): container finished" podID="bef55661-600d-4213-8b7a-d1ac8fec5b6b" containerID="cba2d6c2679932ed0a4e4cc0bc3ec8db000eb1206ecba9d90ac273d5742b29c6" exitCode=0 Nov 25 10:15:02 crc kubenswrapper[4769]: I1125 10:15:02.479642 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" event={"ID":"bef55661-600d-4213-8b7a-d1ac8fec5b6b","Type":"ContainerDied","Data":"cba2d6c2679932ed0a4e4cc0bc3ec8db000eb1206ecba9d90ac273d5742b29c6"} Nov 25 10:15:03 crc kubenswrapper[4769]: I1125 10:15:03.900337 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:04 crc kubenswrapper[4769]: I1125 10:15:04.038153 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkd8v\" (UniqueName: \"kubernetes.io/projected/bef55661-600d-4213-8b7a-d1ac8fec5b6b-kube-api-access-hkd8v\") pod \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\" (UID: \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\") " Nov 25 10:15:04 crc kubenswrapper[4769]: I1125 10:15:04.038305 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bef55661-600d-4213-8b7a-d1ac8fec5b6b-config-volume\") pod \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\" (UID: \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\") " Nov 25 10:15:04 crc kubenswrapper[4769]: I1125 10:15:04.038564 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bef55661-600d-4213-8b7a-d1ac8fec5b6b-secret-volume\") pod \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\" (UID: \"bef55661-600d-4213-8b7a-d1ac8fec5b6b\") " Nov 25 10:15:04 crc kubenswrapper[4769]: I1125 10:15:04.042304 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bef55661-600d-4213-8b7a-d1ac8fec5b6b-config-volume" (OuterVolumeSpecName: "config-volume") pod "bef55661-600d-4213-8b7a-d1ac8fec5b6b" (UID: "bef55661-600d-4213-8b7a-d1ac8fec5b6b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:15:04 crc kubenswrapper[4769]: I1125 10:15:04.044586 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bef55661-600d-4213-8b7a-d1ac8fec5b6b-kube-api-access-hkd8v" (OuterVolumeSpecName: "kube-api-access-hkd8v") pod "bef55661-600d-4213-8b7a-d1ac8fec5b6b" (UID: "bef55661-600d-4213-8b7a-d1ac8fec5b6b"). InnerVolumeSpecName "kube-api-access-hkd8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:15:04 crc kubenswrapper[4769]: I1125 10:15:04.051185 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bef55661-600d-4213-8b7a-d1ac8fec5b6b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "bef55661-600d-4213-8b7a-d1ac8fec5b6b" (UID: "bef55661-600d-4213-8b7a-d1ac8fec5b6b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:15:04 crc kubenswrapper[4769]: I1125 10:15:04.142412 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bef55661-600d-4213-8b7a-d1ac8fec5b6b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:04 crc kubenswrapper[4769]: I1125 10:15:04.142815 4769 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bef55661-600d-4213-8b7a-d1ac8fec5b6b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:04 crc kubenswrapper[4769]: I1125 10:15:04.142829 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkd8v\" (UniqueName: \"kubernetes.io/projected/bef55661-600d-4213-8b7a-d1ac8fec5b6b-kube-api-access-hkd8v\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:04 crc kubenswrapper[4769]: I1125 10:15:04.515213 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" event={"ID":"bef55661-600d-4213-8b7a-d1ac8fec5b6b","Type":"ContainerDied","Data":"7e1c18320f6f3dd208ce2be7ffd3a03e6cc125db6b1accc3e1775cbbac35acca"} Nov 25 10:15:04 crc kubenswrapper[4769]: I1125 10:15:04.515268 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e1c18320f6f3dd208ce2be7ffd3a03e6cc125db6b1accc3e1775cbbac35acca" Nov 25 10:15:04 crc kubenswrapper[4769]: I1125 10:15:04.515349 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns" Nov 25 10:15:09 crc kubenswrapper[4769]: I1125 10:15:09.237521 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:15:09 crc kubenswrapper[4769]: E1125 10:15:09.238727 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:15:22 crc kubenswrapper[4769]: I1125 10:15:22.252289 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:15:22 crc kubenswrapper[4769]: E1125 10:15:22.253765 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:15:27 crc kubenswrapper[4769]: I1125 10:15:27.074609 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-b404-account-create-58x4q"] Nov 25 10:15:27 crc kubenswrapper[4769]: I1125 10:15:27.102347 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-kgxrr"] Nov 25 10:15:27 crc kubenswrapper[4769]: I1125 10:15:27.144384 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0af1-account-create-dkkq6"] Nov 25 10:15:27 crc kubenswrapper[4769]: I1125 10:15:27.160248 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-b404-account-create-58x4q"] Nov 25 10:15:27 crc kubenswrapper[4769]: I1125 10:15:27.178233 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-kgxrr"] Nov 25 10:15:27 crc kubenswrapper[4769]: I1125 10:15:27.196070 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0af1-account-create-dkkq6"] Nov 25 10:15:28 crc kubenswrapper[4769]: I1125 10:15:28.257234 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53cae36d-5253-41e8-b6a8-b716251729b6" path="/var/lib/kubelet/pods/53cae36d-5253-41e8-b6a8-b716251729b6/volumes" Nov 25 10:15:28 crc kubenswrapper[4769]: I1125 10:15:28.260436 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d226b614-be9f-4d60-a7b4-f6ce81a8753f" path="/var/lib/kubelet/pods/d226b614-be9f-4d60-a7b4-f6ce81a8753f/volumes" Nov 25 10:15:28 crc kubenswrapper[4769]: I1125 10:15:28.261027 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbee15e7-80d1-4d49-8da5-ea268221ed4f" path="/var/lib/kubelet/pods/dbee15e7-80d1-4d49-8da5-ea268221ed4f/volumes" Nov 25 10:15:30 crc kubenswrapper[4769]: I1125 10:15:30.049170 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-2g226"] Nov 25 10:15:30 crc kubenswrapper[4769]: I1125 10:15:30.062338 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-966e-account-create-cd5fr"] Nov 25 10:15:30 crc kubenswrapper[4769]: I1125 10:15:30.072307 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-2g226"] Nov 25 10:15:30 crc kubenswrapper[4769]: I1125 10:15:30.082816 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-966e-account-create-cd5fr"] Nov 25 10:15:30 crc kubenswrapper[4769]: I1125 10:15:30.264176 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4286152-9aa2-4d10-94b0-0676b434dc03" path="/var/lib/kubelet/pods/b4286152-9aa2-4d10-94b0-0676b434dc03/volumes" Nov 25 10:15:30 crc kubenswrapper[4769]: I1125 10:15:30.267007 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2f8ec92-08ca-40fd-b130-33cbd71ab39b" path="/var/lib/kubelet/pods/e2f8ec92-08ca-40fd-b130-33cbd71ab39b/volumes" Nov 25 10:15:32 crc kubenswrapper[4769]: I1125 10:15:32.062647 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-cjxql"] Nov 25 10:15:32 crc kubenswrapper[4769]: I1125 10:15:32.078377 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-24df-account-create-vj7tp"] Nov 25 10:15:32 crc kubenswrapper[4769]: I1125 10:15:32.090413 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-cjxql"] Nov 25 10:15:32 crc kubenswrapper[4769]: I1125 10:15:32.100212 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-24df-account-create-vj7tp"] Nov 25 10:15:32 crc kubenswrapper[4769]: I1125 10:15:32.255584 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28837799-87a3-4a15-aaeb-91c949f21ef5" path="/var/lib/kubelet/pods/28837799-87a3-4a15-aaeb-91c949f21ef5/volumes" Nov 25 10:15:32 crc kubenswrapper[4769]: I1125 10:15:32.256454 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d7f8889-8ac3-4fbd-83be-743fd53cea7a" path="/var/lib/kubelet/pods/7d7f8889-8ac3-4fbd-83be-743fd53cea7a/volumes" Nov 25 10:15:33 crc kubenswrapper[4769]: I1125 10:15:33.031847 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-rlvtg"] Nov 25 10:15:33 crc kubenswrapper[4769]: I1125 10:15:33.040842 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-rlvtg"] Nov 25 10:15:34 crc kubenswrapper[4769]: I1125 10:15:34.257223 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="783756da-1dfb-49b3-9472-d33844dffc95" path="/var/lib/kubelet/pods/783756da-1dfb-49b3-9472-d33844dffc95/volumes" Nov 25 10:15:36 crc kubenswrapper[4769]: I1125 10:15:36.238000 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:15:37 crc kubenswrapper[4769]: I1125 10:15:37.024998 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"9806b9991b42941f4f09cbc1245d89fa1181b893dd6fc482374becd2c41fa1bf"} Nov 25 10:15:37 crc kubenswrapper[4769]: I1125 10:15:37.067780 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-9010-account-create-8r524"] Nov 25 10:15:37 crc kubenswrapper[4769]: I1125 10:15:37.094226 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp"] Nov 25 10:15:37 crc kubenswrapper[4769]: I1125 10:15:37.125287 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-9010-account-create-8r524"] Nov 25 10:15:37 crc kubenswrapper[4769]: I1125 10:15:37.139653 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-xxdkp"] Nov 25 10:15:38 crc kubenswrapper[4769]: I1125 10:15:38.263679 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bc94902-4625-4198-a28e-a52f51888eea" path="/var/lib/kubelet/pods/3bc94902-4625-4198-a28e-a52f51888eea/volumes" Nov 25 10:15:38 crc kubenswrapper[4769]: I1125 10:15:38.267023 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ff8162a-1d7e-4415-b231-5d09857454cb" path="/var/lib/kubelet/pods/9ff8162a-1d7e-4415-b231-5d09857454cb/volumes" Nov 25 10:15:41 crc kubenswrapper[4769]: I1125 10:15:41.049745 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-q5w5g"] Nov 25 10:15:41 crc kubenswrapper[4769]: I1125 10:15:41.066543 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-q5w5g"] Nov 25 10:15:42 crc kubenswrapper[4769]: I1125 10:15:42.049521 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-498c-account-create-4dtkm"] Nov 25 10:15:42 crc kubenswrapper[4769]: I1125 10:15:42.068724 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-498c-account-create-4dtkm"] Nov 25 10:15:42 crc kubenswrapper[4769]: I1125 10:15:42.250804 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c0172ef-616f-4636-b51f-4e534a850822" path="/var/lib/kubelet/pods/8c0172ef-616f-4636-b51f-4e534a850822/volumes" Nov 25 10:15:42 crc kubenswrapper[4769]: I1125 10:15:42.255144 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5155b12-87cf-4695-a19c-d54322926320" path="/var/lib/kubelet/pods/c5155b12-87cf-4695-a19c-d54322926320/volumes" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.237559 4769 scope.go:117] "RemoveContainer" containerID="511333d30b3ff933fa85b13db5ff01f2938bfa2ad4f8a9b464f104c151252bb1" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.295626 4769 scope.go:117] "RemoveContainer" containerID="c236b72e809a4e5e305f9a7b1ec56438a38e8b53274cf98e0b9321ef3bf1b890" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.356447 4769 scope.go:117] "RemoveContainer" containerID="0c800b2a9cf0548231ba249b6b44ff268432ab40918388b26d163be6f959e5a8" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.381012 4769 scope.go:117] "RemoveContainer" containerID="db6cf4b655ec479d27626be2cd2afbc3295defb227fcf96069846346f0c90ca6" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.441470 4769 scope.go:117] "RemoveContainer" containerID="e8b88b86a1cb53cee34b690bf43238a07397b0910e4170619117262274535577" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.486242 4769 scope.go:117] "RemoveContainer" containerID="7c481239b8be7e86fff05999c53aecfeb57843ac547d565d8963b89952d03fe7" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.555669 4769 scope.go:117] "RemoveContainer" containerID="e79bcdec7e358a37742ab3f695d3004794a348553ffcd822a206a99efaa99371" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.590954 4769 scope.go:117] "RemoveContainer" containerID="5637982e602a3100947e08914ec4192f6b6acced023664555932a84db33bcc62" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.618322 4769 scope.go:117] "RemoveContainer" containerID="9c99c28d76e8806f8744965d1d05550b064ba4de5eeae673f98153124818e9a7" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.648593 4769 scope.go:117] "RemoveContainer" containerID="a30a35cfbddd31cb6341859fa87f334fc55443a6ca8812377401ce333e8e0c90" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.680696 4769 scope.go:117] "RemoveContainer" containerID="2cb2b538777f7251522fd0b7f4abf05485ab29a97c999e901c40d54906640f7e" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.714893 4769 scope.go:117] "RemoveContainer" containerID="945cecfcbbaf9212d21bdec44eb73539b745cc966dcd1ec4383256a7d0c35c5c" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.743935 4769 scope.go:117] "RemoveContainer" containerID="0737cf1e9f3c9914d55a1c16965fb73261025d22adb852ebf9bc02abf8feae61" Nov 25 10:15:44 crc kubenswrapper[4769]: I1125 10:15:44.770621 4769 scope.go:117] "RemoveContainer" containerID="db03c10ecd567a8fa81fc8798c65b6894aae2cbacd881d33f3229c4043ad762e" Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.040809 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-kd9nw"] Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.054799 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-1a9a-account-create-v2rj6"] Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.075541 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-kd9nw"] Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.085049 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-8r5jb"] Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.093499 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-e28d-account-create-w5wr6"] Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.102401 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-e28d-account-create-w5wr6"] Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.111521 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-dea0-account-create-8ns6l"] Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.120714 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-8r5jb"] Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.129425 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-1a9a-account-create-v2rj6"] Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.138236 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-44vlp"] Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.146184 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-dea0-account-create-8ns6l"] Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.154135 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-44vlp"] Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.264237 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11fbb390-f40d-4098-adab-b13e53b51cc8" path="/var/lib/kubelet/pods/11fbb390-f40d-4098-adab-b13e53b51cc8/volumes" Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.266001 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="192781af-b5a8-4804-b3a9-a53290328c8c" path="/var/lib/kubelet/pods/192781af-b5a8-4804-b3a9-a53290328c8c/volumes" Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.267034 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25f32f1a-8af2-4632-8a1e-689576d2d17b" path="/var/lib/kubelet/pods/25f32f1a-8af2-4632-8a1e-689576d2d17b/volumes" Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.269049 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27cb33b2-18c8-4d25-a441-4bdc64d8bfde" path="/var/lib/kubelet/pods/27cb33b2-18c8-4d25-a441-4bdc64d8bfde/volumes" Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.272829 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4223d39-07d7-421b-b3b7-953758e77444" path="/var/lib/kubelet/pods/a4223d39-07d7-421b-b3b7-953758e77444/volumes" Nov 25 10:15:46 crc kubenswrapper[4769]: I1125 10:15:46.273711 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2550677-acee-408a-86c9-149a068d10a1" path="/var/lib/kubelet/pods/d2550677-acee-408a-86c9-149a068d10a1/volumes" Nov 25 10:15:55 crc kubenswrapper[4769]: I1125 10:15:55.042824 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-kt2fl"] Nov 25 10:15:55 crc kubenswrapper[4769]: I1125 10:15:55.056567 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-kt2fl"] Nov 25 10:15:56 crc kubenswrapper[4769]: I1125 10:15:56.253345 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5719295-a2f1-4d2f-a021-1da5411acbc8" path="/var/lib/kubelet/pods/e5719295-a2f1-4d2f-a021-1da5411acbc8/volumes" Nov 25 10:16:16 crc kubenswrapper[4769]: I1125 10:16:16.049008 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-jm2np"] Nov 25 10:16:16 crc kubenswrapper[4769]: I1125 10:16:16.068118 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-jm2np"] Nov 25 10:16:16 crc kubenswrapper[4769]: I1125 10:16:16.272820 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28b97e59-606d-4810-bec5-cecbc1c691bc" path="/var/lib/kubelet/pods/28b97e59-606d-4810-bec5-cecbc1c691bc/volumes" Nov 25 10:16:22 crc kubenswrapper[4769]: I1125 10:16:22.662357 4769 generic.go:334] "Generic (PLEG): container finished" podID="77bb348e-56c3-4597-917d-5d918bfad3ca" containerID="6e9ad158876d4e2310c510099565c3b8cb1b245f2fe131b5e336ba49467414e8" exitCode=0 Nov 25 10:16:22 crc kubenswrapper[4769]: I1125 10:16:22.662539 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" event={"ID":"77bb348e-56c3-4597-917d-5d918bfad3ca","Type":"ContainerDied","Data":"6e9ad158876d4e2310c510099565c3b8cb1b245f2fe131b5e336ba49467414e8"} Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.216431 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.220506 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-inventory\") pod \"77bb348e-56c3-4597-917d-5d918bfad3ca\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.220700 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-ssh-key\") pod \"77bb348e-56c3-4597-917d-5d918bfad3ca\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.220954 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-bootstrap-combined-ca-bundle\") pod \"77bb348e-56c3-4597-917d-5d918bfad3ca\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.248816 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "77bb348e-56c3-4597-917d-5d918bfad3ca" (UID: "77bb348e-56c3-4597-917d-5d918bfad3ca"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.266491 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-inventory" (OuterVolumeSpecName: "inventory") pod "77bb348e-56c3-4597-917d-5d918bfad3ca" (UID: "77bb348e-56c3-4597-917d-5d918bfad3ca"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.270744 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "77bb348e-56c3-4597-917d-5d918bfad3ca" (UID: "77bb348e-56c3-4597-917d-5d918bfad3ca"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.323794 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6ftk\" (UniqueName: \"kubernetes.io/projected/77bb348e-56c3-4597-917d-5d918bfad3ca-kube-api-access-s6ftk\") pod \"77bb348e-56c3-4597-917d-5d918bfad3ca\" (UID: \"77bb348e-56c3-4597-917d-5d918bfad3ca\") " Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.325126 4769 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.325270 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.325360 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77bb348e-56c3-4597-917d-5d918bfad3ca-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.328720 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77bb348e-56c3-4597-917d-5d918bfad3ca-kube-api-access-s6ftk" (OuterVolumeSpecName: "kube-api-access-s6ftk") pod "77bb348e-56c3-4597-917d-5d918bfad3ca" (UID: "77bb348e-56c3-4597-917d-5d918bfad3ca"). InnerVolumeSpecName "kube-api-access-s6ftk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.427523 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s6ftk\" (UniqueName: \"kubernetes.io/projected/77bb348e-56c3-4597-917d-5d918bfad3ca-kube-api-access-s6ftk\") on node \"crc\" DevicePath \"\"" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.696887 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" event={"ID":"77bb348e-56c3-4597-917d-5d918bfad3ca","Type":"ContainerDied","Data":"cf6ad2b0956b399edf2aa176541819f8298c20cd5c6101a41a3959c917385558"} Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.697364 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf6ad2b0956b399edf2aa176541819f8298c20cd5c6101a41a3959c917385558" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.697080 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.797858 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf"] Nov 25 10:16:24 crc kubenswrapper[4769]: E1125 10:16:24.798546 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77bb348e-56c3-4597-917d-5d918bfad3ca" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.798576 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="77bb348e-56c3-4597-917d-5d918bfad3ca" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 10:16:24 crc kubenswrapper[4769]: E1125 10:16:24.798616 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bef55661-600d-4213-8b7a-d1ac8fec5b6b" containerName="collect-profiles" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.798625 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="bef55661-600d-4213-8b7a-d1ac8fec5b6b" containerName="collect-profiles" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.798904 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="bef55661-600d-4213-8b7a-d1ac8fec5b6b" containerName="collect-profiles" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.798941 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="77bb348e-56c3-4597-917d-5d918bfad3ca" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.800123 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.802471 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.803979 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.804060 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.805234 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.811717 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf"] Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.837894 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqrr2\" (UniqueName: \"kubernetes.io/projected/d10dc8b2-044a-4829-b8bd-54559166b436-kube-api-access-kqrr2\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xknzf\" (UID: \"d10dc8b2-044a-4829-b8bd-54559166b436\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.838009 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d10dc8b2-044a-4829-b8bd-54559166b436-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xknzf\" (UID: \"d10dc8b2-044a-4829-b8bd-54559166b436\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.838563 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d10dc8b2-044a-4829-b8bd-54559166b436-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xknzf\" (UID: \"d10dc8b2-044a-4829-b8bd-54559166b436\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.941100 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqrr2\" (UniqueName: \"kubernetes.io/projected/d10dc8b2-044a-4829-b8bd-54559166b436-kube-api-access-kqrr2\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xknzf\" (UID: \"d10dc8b2-044a-4829-b8bd-54559166b436\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.941198 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d10dc8b2-044a-4829-b8bd-54559166b436-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xknzf\" (UID: \"d10dc8b2-044a-4829-b8bd-54559166b436\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.941389 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d10dc8b2-044a-4829-b8bd-54559166b436-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xknzf\" (UID: \"d10dc8b2-044a-4829-b8bd-54559166b436\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.948102 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d10dc8b2-044a-4829-b8bd-54559166b436-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xknzf\" (UID: \"d10dc8b2-044a-4829-b8bd-54559166b436\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.948119 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d10dc8b2-044a-4829-b8bd-54559166b436-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xknzf\" (UID: \"d10dc8b2-044a-4829-b8bd-54559166b436\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:16:24 crc kubenswrapper[4769]: I1125 10:16:24.960422 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqrr2\" (UniqueName: \"kubernetes.io/projected/d10dc8b2-044a-4829-b8bd-54559166b436-kube-api-access-kqrr2\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-xknzf\" (UID: \"d10dc8b2-044a-4829-b8bd-54559166b436\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:16:25 crc kubenswrapper[4769]: I1125 10:16:25.123355 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:16:25 crc kubenswrapper[4769]: I1125 10:16:25.734669 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf"] Nov 25 10:16:25 crc kubenswrapper[4769]: I1125 10:16:25.741459 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:16:26 crc kubenswrapper[4769]: I1125 10:16:26.723946 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" event={"ID":"d10dc8b2-044a-4829-b8bd-54559166b436","Type":"ContainerStarted","Data":"db35d2e7ed86c54cf68af148986135e891da8832c5e0a16a222d57af15fb0b61"} Nov 25 10:16:26 crc kubenswrapper[4769]: I1125 10:16:26.724492 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" event={"ID":"d10dc8b2-044a-4829-b8bd-54559166b436","Type":"ContainerStarted","Data":"73c7eb1ed18ce13c278e3b9c0bbb37a44e3f8258b7c12635165b7ddc268ab32e"} Nov 25 10:16:26 crc kubenswrapper[4769]: I1125 10:16:26.752879 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" podStartSLOduration=2.250966427 podStartE2EDuration="2.752848662s" podCreationTimestamp="2025-11-25 10:16:24 +0000 UTC" firstStartedPulling="2025-11-25 10:16:25.74119523 +0000 UTC m=+1934.326167543" lastFinishedPulling="2025-11-25 10:16:26.243077465 +0000 UTC m=+1934.828049778" observedRunningTime="2025-11-25 10:16:26.740936182 +0000 UTC m=+1935.325908525" watchObservedRunningTime="2025-11-25 10:16:26.752848662 +0000 UTC m=+1935.337820995" Nov 25 10:16:31 crc kubenswrapper[4769]: I1125 10:16:31.037941 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-jrgf9"] Nov 25 10:16:31 crc kubenswrapper[4769]: I1125 10:16:31.048553 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-jrgf9"] Nov 25 10:16:32 crc kubenswrapper[4769]: I1125 10:16:32.248059 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5bb7cfa5-1e81-4d93-a15a-77260adec0d3" path="/var/lib/kubelet/pods/5bb7cfa5-1e81-4d93-a15a-77260adec0d3/volumes" Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.071311 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-9zbdp"] Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.089505 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-qkr9g"] Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.111398 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-9zbdp"] Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.132666 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-qkr9g"] Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.214600 4769 scope.go:117] "RemoveContainer" containerID="b176b2b613fd303e61f4c5784dbcb2a06c08b8533cfc13538dd118b0c84b0a99" Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.249484 4769 scope.go:117] "RemoveContainer" containerID="1d9188b083315ab023833cbc7f4522c645acd127b5322b4267af0310ee8e9240" Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.318403 4769 scope.go:117] "RemoveContainer" containerID="096dec0bff515f769bec47c9874a06e3620cb9075a0534fbd7b6e776e814dcac" Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.379031 4769 scope.go:117] "RemoveContainer" containerID="6cd2ed523228dd14223e9d41d61effb2954c846e1c8f67fc0edce305b18579d2" Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.442651 4769 scope.go:117] "RemoveContainer" containerID="b1f251e19bc2fedf163c481271d36edac389467986138a068d966dfff510ee10" Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.485851 4769 scope.go:117] "RemoveContainer" containerID="2c1bd92ce912803b300403769ebf27817171f025600603407568e2704132a5cd" Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.561930 4769 scope.go:117] "RemoveContainer" containerID="855770c5a65f55b54761d784a001d20212b2240c528cce318e0a2a72c055c7f1" Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.598588 4769 scope.go:117] "RemoveContainer" containerID="ec9b19989db202844e33f037416fd13d79f2e055b6bdd6f2e3bb8d99565e99a9" Nov 25 10:16:45 crc kubenswrapper[4769]: I1125 10:16:45.631016 4769 scope.go:117] "RemoveContainer" containerID="d334b21beaaff8a289ce74d803cb34309fdca5b99bee7152a261357c95dedd6c" Nov 25 10:16:46 crc kubenswrapper[4769]: I1125 10:16:46.046744 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-pvsgh"] Nov 25 10:16:46 crc kubenswrapper[4769]: I1125 10:16:46.060241 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-pvsgh"] Nov 25 10:16:46 crc kubenswrapper[4769]: I1125 10:16:46.272241 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="175271fe-6677-49b5-b497-c45ef1816fb7" path="/var/lib/kubelet/pods/175271fe-6677-49b5-b497-c45ef1816fb7/volumes" Nov 25 10:16:46 crc kubenswrapper[4769]: I1125 10:16:46.275148 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29edc9fc-ffe1-4511-9f89-9d0795f063cb" path="/var/lib/kubelet/pods/29edc9fc-ffe1-4511-9f89-9d0795f063cb/volumes" Nov 25 10:16:46 crc kubenswrapper[4769]: I1125 10:16:46.277462 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee29c33a-80a8-4f26-81bd-1af50bbbaabb" path="/var/lib/kubelet/pods/ee29c33a-80a8-4f26-81bd-1af50bbbaabb/volumes" Nov 25 10:17:03 crc kubenswrapper[4769]: I1125 10:17:03.047887 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-lfvzs"] Nov 25 10:17:03 crc kubenswrapper[4769]: I1125 10:17:03.071444 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-lfvzs"] Nov 25 10:17:04 crc kubenswrapper[4769]: I1125 10:17:04.259481 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed79e867-002b-4591-b757-0410b73a43ef" path="/var/lib/kubelet/pods/ed79e867-002b-4591-b757-0410b73a43ef/volumes" Nov 25 10:17:45 crc kubenswrapper[4769]: I1125 10:17:45.893823 4769 scope.go:117] "RemoveContainer" containerID="c7751374f75eda43b05b2eb51be43fe0c15ca757e586d1a21f51229c42e0f892" Nov 25 10:17:45 crc kubenswrapper[4769]: I1125 10:17:45.946347 4769 scope.go:117] "RemoveContainer" containerID="09a8800fabbe0b8cff7aa77a649828ff9cb1df93447412766ac0f90f2cee23c2" Nov 25 10:17:46 crc kubenswrapper[4769]: I1125 10:17:46.023707 4769 scope.go:117] "RemoveContainer" containerID="978d649392af26e7b658de6ba64a81ab9a59b5f0291633e52cc1e43cea2315f7" Nov 25 10:17:46 crc kubenswrapper[4769]: I1125 10:17:46.082535 4769 scope.go:117] "RemoveContainer" containerID="41ab74e3a94e982b2db87464ee5df37c27ce86c7c00fbe475355b5d6a6e8078e" Nov 25 10:17:52 crc kubenswrapper[4769]: I1125 10:17:52.290478 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:17:52 crc kubenswrapper[4769]: I1125 10:17:52.291307 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:18:06 crc kubenswrapper[4769]: I1125 10:18:06.076387 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-9s2mb"] Nov 25 10:18:06 crc kubenswrapper[4769]: I1125 10:18:06.093102 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-bvfxk"] Nov 25 10:18:06 crc kubenswrapper[4769]: I1125 10:18:06.106014 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-bvfxk"] Nov 25 10:18:06 crc kubenswrapper[4769]: I1125 10:18:06.117474 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-9s2mb"] Nov 25 10:18:06 crc kubenswrapper[4769]: I1125 10:18:06.254917 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="219e181e-2dbc-410e-aa4e-e9c843fb2aa9" path="/var/lib/kubelet/pods/219e181e-2dbc-410e-aa4e-e9c843fb2aa9/volumes" Nov 25 10:18:06 crc kubenswrapper[4769]: I1125 10:18:06.256019 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94da5c0c-3b95-4513-9fcb-6ee861294428" path="/var/lib/kubelet/pods/94da5c0c-3b95-4513-9fcb-6ee861294428/volumes" Nov 25 10:18:08 crc kubenswrapper[4769]: I1125 10:18:08.045346 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f2a2-account-create-q65xr"] Nov 25 10:18:08 crc kubenswrapper[4769]: I1125 10:18:08.062039 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-72k77"] Nov 25 10:18:08 crc kubenswrapper[4769]: I1125 10:18:08.070200 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-b9e5-account-create-8s7mq"] Nov 25 10:18:08 crc kubenswrapper[4769]: I1125 10:18:08.107435 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-f2a2-account-create-q65xr"] Nov 25 10:18:08 crc kubenswrapper[4769]: I1125 10:18:08.107522 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-8f09-account-create-gl77m"] Nov 25 10:18:08 crc kubenswrapper[4769]: I1125 10:18:08.118146 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-8f09-account-create-gl77m"] Nov 25 10:18:08 crc kubenswrapper[4769]: I1125 10:18:08.143436 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-72k77"] Nov 25 10:18:08 crc kubenswrapper[4769]: I1125 10:18:08.151506 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-b9e5-account-create-8s7mq"] Nov 25 10:18:08 crc kubenswrapper[4769]: I1125 10:18:08.251349 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="352ab718-d89c-430d-a7ca-a7ad712d63d6" path="/var/lib/kubelet/pods/352ab718-d89c-430d-a7ca-a7ad712d63d6/volumes" Nov 25 10:18:08 crc kubenswrapper[4769]: I1125 10:18:08.252484 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a43c3555-7a5d-4824-a913-98e74014c48c" path="/var/lib/kubelet/pods/a43c3555-7a5d-4824-a913-98e74014c48c/volumes" Nov 25 10:18:08 crc kubenswrapper[4769]: I1125 10:18:08.253072 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5017f20-1e2f-44e6-9b68-f8b64ca42cc2" path="/var/lib/kubelet/pods/c5017f20-1e2f-44e6-9b68-f8b64ca42cc2/volumes" Nov 25 10:18:08 crc kubenswrapper[4769]: I1125 10:18:08.253614 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0335a31-5586-412d-b8fd-740763f60392" path="/var/lib/kubelet/pods/e0335a31-5586-412d-b8fd-740763f60392/volumes" Nov 25 10:18:22 crc kubenswrapper[4769]: I1125 10:18:22.291058 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:18:22 crc kubenswrapper[4769]: I1125 10:18:22.292097 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:18:37 crc kubenswrapper[4769]: I1125 10:18:37.045246 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-q6p8q"] Nov 25 10:18:37 crc kubenswrapper[4769]: I1125 10:18:37.055170 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-q6p8q"] Nov 25 10:18:38 crc kubenswrapper[4769]: I1125 10:18:38.254094 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69ff4eda-4f38-4778-9e92-ceb7005f8420" path="/var/lib/kubelet/pods/69ff4eda-4f38-4778-9e92-ceb7005f8420/volumes" Nov 25 10:18:44 crc kubenswrapper[4769]: I1125 10:18:44.041511 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-scf89"] Nov 25 10:18:44 crc kubenswrapper[4769]: I1125 10:18:44.056098 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-d76d-account-create-cqmjg"] Nov 25 10:18:44 crc kubenswrapper[4769]: I1125 10:18:44.067509 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-scf89"] Nov 25 10:18:44 crc kubenswrapper[4769]: I1125 10:18:44.077122 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-d76d-account-create-cqmjg"] Nov 25 10:18:44 crc kubenswrapper[4769]: I1125 10:18:44.252289 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f" path="/var/lib/kubelet/pods/4b1e6bf8-e126-48bf-9692-e38e6a3b9f2f/volumes" Nov 25 10:18:44 crc kubenswrapper[4769]: I1125 10:18:44.252885 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9817ed57-5bda-46f1-ad02-e2ac6e770543" path="/var/lib/kubelet/pods/9817ed57-5bda-46f1-ad02-e2ac6e770543/volumes" Nov 25 10:18:46 crc kubenswrapper[4769]: I1125 10:18:46.254540 4769 scope.go:117] "RemoveContainer" containerID="6efa13f3c1f1ef376f8c9106ef8b49ede65c553462019c2a0107ae516c4f2a50" Nov 25 10:18:46 crc kubenswrapper[4769]: I1125 10:18:46.301495 4769 scope.go:117] "RemoveContainer" containerID="79f38451ad711cb20cd3e2e39cda77b9320d32f1dcf74b914118ff6d66f2f6cc" Nov 25 10:18:46 crc kubenswrapper[4769]: I1125 10:18:46.356110 4769 scope.go:117] "RemoveContainer" containerID="9fc04321d90f797532ec952fd5c985b529d666039ff5b01f6c7dcc7faa6b7748" Nov 25 10:18:46 crc kubenswrapper[4769]: I1125 10:18:46.417871 4769 scope.go:117] "RemoveContainer" containerID="b68fd3a8fa6244da0431b838ee009927068922f3562f350aac98010c51fc228d" Nov 25 10:18:46 crc kubenswrapper[4769]: I1125 10:18:46.476414 4769 scope.go:117] "RemoveContainer" containerID="33eff60b4e7d2f17d0fe5a2e17a8a4780d67a8be77cb9f89cad9453ef6b5d7c9" Nov 25 10:18:46 crc kubenswrapper[4769]: I1125 10:18:46.536623 4769 scope.go:117] "RemoveContainer" containerID="ebda9b29ca57389ef6aa287e331a81143559af722bc39a5aff969a40f690ee8b" Nov 25 10:18:46 crc kubenswrapper[4769]: I1125 10:18:46.594500 4769 scope.go:117] "RemoveContainer" containerID="d67cfa2ad39238530eb0c1a19050ada8df3c3d5bf120b837d572fc813c1fb668" Nov 25 10:18:46 crc kubenswrapper[4769]: I1125 10:18:46.631704 4769 scope.go:117] "RemoveContainer" containerID="a2ad5432078c4545b460768afec3bcf1b98236aeb7a0b03712a4a1f5a7eb3044" Nov 25 10:18:46 crc kubenswrapper[4769]: I1125 10:18:46.668696 4769 scope.go:117] "RemoveContainer" containerID="31015313ac4f19d93a6c724e068a1d06894410010be4d3cfefa48daf7947dab8" Nov 25 10:18:52 crc kubenswrapper[4769]: I1125 10:18:52.290603 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:18:52 crc kubenswrapper[4769]: I1125 10:18:52.291575 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:18:52 crc kubenswrapper[4769]: I1125 10:18:52.291636 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 10:18:52 crc kubenswrapper[4769]: I1125 10:18:52.292379 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9806b9991b42941f4f09cbc1245d89fa1181b893dd6fc482374becd2c41fa1bf"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:18:52 crc kubenswrapper[4769]: I1125 10:18:52.292445 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://9806b9991b42941f4f09cbc1245d89fa1181b893dd6fc482374becd2c41fa1bf" gracePeriod=600 Nov 25 10:18:52 crc kubenswrapper[4769]: I1125 10:18:52.763225 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="9806b9991b42941f4f09cbc1245d89fa1181b893dd6fc482374becd2c41fa1bf" exitCode=0 Nov 25 10:18:52 crc kubenswrapper[4769]: I1125 10:18:52.763308 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"9806b9991b42941f4f09cbc1245d89fa1181b893dd6fc482374becd2c41fa1bf"} Nov 25 10:18:52 crc kubenswrapper[4769]: I1125 10:18:52.763770 4769 scope.go:117] "RemoveContainer" containerID="19d7e4719048ead4c6811e0bf09eaeb04f5d9c0b0e2c91bc113c457c1276b7a7" Nov 25 10:18:53 crc kubenswrapper[4769]: I1125 10:18:53.779095 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b"} Nov 25 10:19:02 crc kubenswrapper[4769]: I1125 10:19:02.935000 4769 generic.go:334] "Generic (PLEG): container finished" podID="d10dc8b2-044a-4829-b8bd-54559166b436" containerID="db35d2e7ed86c54cf68af148986135e891da8832c5e0a16a222d57af15fb0b61" exitCode=0 Nov 25 10:19:02 crc kubenswrapper[4769]: I1125 10:19:02.935484 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" event={"ID":"d10dc8b2-044a-4829-b8bd-54559166b436","Type":"ContainerDied","Data":"db35d2e7ed86c54cf68af148986135e891da8832c5e0a16a222d57af15fb0b61"} Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.488228 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.650715 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqrr2\" (UniqueName: \"kubernetes.io/projected/d10dc8b2-044a-4829-b8bd-54559166b436-kube-api-access-kqrr2\") pod \"d10dc8b2-044a-4829-b8bd-54559166b436\" (UID: \"d10dc8b2-044a-4829-b8bd-54559166b436\") " Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.650920 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d10dc8b2-044a-4829-b8bd-54559166b436-ssh-key\") pod \"d10dc8b2-044a-4829-b8bd-54559166b436\" (UID: \"d10dc8b2-044a-4829-b8bd-54559166b436\") " Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.651215 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d10dc8b2-044a-4829-b8bd-54559166b436-inventory\") pod \"d10dc8b2-044a-4829-b8bd-54559166b436\" (UID: \"d10dc8b2-044a-4829-b8bd-54559166b436\") " Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.658580 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d10dc8b2-044a-4829-b8bd-54559166b436-kube-api-access-kqrr2" (OuterVolumeSpecName: "kube-api-access-kqrr2") pod "d10dc8b2-044a-4829-b8bd-54559166b436" (UID: "d10dc8b2-044a-4829-b8bd-54559166b436"). InnerVolumeSpecName "kube-api-access-kqrr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.681581 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d10dc8b2-044a-4829-b8bd-54559166b436-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d10dc8b2-044a-4829-b8bd-54559166b436" (UID: "d10dc8b2-044a-4829-b8bd-54559166b436"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.708079 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d10dc8b2-044a-4829-b8bd-54559166b436-inventory" (OuterVolumeSpecName: "inventory") pod "d10dc8b2-044a-4829-b8bd-54559166b436" (UID: "d10dc8b2-044a-4829-b8bd-54559166b436"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.755860 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d10dc8b2-044a-4829-b8bd-54559166b436-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.755945 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d10dc8b2-044a-4829-b8bd-54559166b436-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.756001 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqrr2\" (UniqueName: \"kubernetes.io/projected/d10dc8b2-044a-4829-b8bd-54559166b436-kube-api-access-kqrr2\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.964856 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" event={"ID":"d10dc8b2-044a-4829-b8bd-54559166b436","Type":"ContainerDied","Data":"73c7eb1ed18ce13c278e3b9c0bbb37a44e3f8258b7c12635165b7ddc268ab32e"} Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.965140 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73c7eb1ed18ce13c278e3b9c0bbb37a44e3f8258b7c12635165b7ddc268ab32e" Nov 25 10:19:04 crc kubenswrapper[4769]: I1125 10:19:04.965209 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-xknzf" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.160134 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw"] Nov 25 10:19:05 crc kubenswrapper[4769]: E1125 10:19:05.161014 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d10dc8b2-044a-4829-b8bd-54559166b436" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.161046 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="d10dc8b2-044a-4829-b8bd-54559166b436" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.161456 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="d10dc8b2-044a-4829-b8bd-54559166b436" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.162952 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.165857 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.166730 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.166749 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.166926 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.169454 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw"] Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.267806 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ec573501-99b0-4833-828d-ac951684f714-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-56djw\" (UID: \"ec573501-99b0-4833-828d-ac951684f714\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.268127 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ec573501-99b0-4833-828d-ac951684f714-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-56djw\" (UID: \"ec573501-99b0-4833-828d-ac951684f714\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.268332 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qdm9\" (UniqueName: \"kubernetes.io/projected/ec573501-99b0-4833-828d-ac951684f714-kube-api-access-7qdm9\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-56djw\" (UID: \"ec573501-99b0-4833-828d-ac951684f714\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.370481 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ec573501-99b0-4833-828d-ac951684f714-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-56djw\" (UID: \"ec573501-99b0-4833-828d-ac951684f714\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.370549 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qdm9\" (UniqueName: \"kubernetes.io/projected/ec573501-99b0-4833-828d-ac951684f714-kube-api-access-7qdm9\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-56djw\" (UID: \"ec573501-99b0-4833-828d-ac951684f714\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.370639 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ec573501-99b0-4833-828d-ac951684f714-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-56djw\" (UID: \"ec573501-99b0-4833-828d-ac951684f714\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.376049 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ec573501-99b0-4833-828d-ac951684f714-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-56djw\" (UID: \"ec573501-99b0-4833-828d-ac951684f714\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.376150 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ec573501-99b0-4833-828d-ac951684f714-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-56djw\" (UID: \"ec573501-99b0-4833-828d-ac951684f714\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.387819 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qdm9\" (UniqueName: \"kubernetes.io/projected/ec573501-99b0-4833-828d-ac951684f714-kube-api-access-7qdm9\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-56djw\" (UID: \"ec573501-99b0-4833-828d-ac951684f714\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.494800 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.724851 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-27wcg"] Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.728202 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.733764 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-27wcg"] Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.884708 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-catalog-content\") pod \"community-operators-27wcg\" (UID: \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\") " pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.884844 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmvzr\" (UniqueName: \"kubernetes.io/projected/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-kube-api-access-bmvzr\") pod \"community-operators-27wcg\" (UID: \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\") " pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.884903 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-utilities\") pod \"community-operators-27wcg\" (UID: \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\") " pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.987293 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-utilities\") pod \"community-operators-27wcg\" (UID: \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\") " pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.987449 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-catalog-content\") pod \"community-operators-27wcg\" (UID: \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\") " pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.987570 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmvzr\" (UniqueName: \"kubernetes.io/projected/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-kube-api-access-bmvzr\") pod \"community-operators-27wcg\" (UID: \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\") " pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.988126 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-utilities\") pod \"community-operators-27wcg\" (UID: \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\") " pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:05 crc kubenswrapper[4769]: I1125 10:19:05.988287 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-catalog-content\") pod \"community-operators-27wcg\" (UID: \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\") " pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:06 crc kubenswrapper[4769]: I1125 10:19:06.012380 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmvzr\" (UniqueName: \"kubernetes.io/projected/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-kube-api-access-bmvzr\") pod \"community-operators-27wcg\" (UID: \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\") " pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:06 crc kubenswrapper[4769]: I1125 10:19:06.056681 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:06 crc kubenswrapper[4769]: I1125 10:19:06.133362 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw"] Nov 25 10:19:06 crc kubenswrapper[4769]: W1125 10:19:06.152621 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec573501_99b0_4833_828d_ac951684f714.slice/crio-6cb5f6f4f1355e1e91eb57cc00255b3dbf551759fded504c3d4732dcc3dd9c43 WatchSource:0}: Error finding container 6cb5f6f4f1355e1e91eb57cc00255b3dbf551759fded504c3d4732dcc3dd9c43: Status 404 returned error can't find the container with id 6cb5f6f4f1355e1e91eb57cc00255b3dbf551759fded504c3d4732dcc3dd9c43 Nov 25 10:19:06 crc kubenswrapper[4769]: I1125 10:19:06.637016 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-27wcg"] Nov 25 10:19:06 crc kubenswrapper[4769]: W1125 10:19:06.643303 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc52c99c8_da92_47ee_adc5_ef6ab0d91f55.slice/crio-5636bc3486ebd58c869de23f308bf83e7a2867a209e3e6b977192eb2f3657be8 WatchSource:0}: Error finding container 5636bc3486ebd58c869de23f308bf83e7a2867a209e3e6b977192eb2f3657be8: Status 404 returned error can't find the container with id 5636bc3486ebd58c869de23f308bf83e7a2867a209e3e6b977192eb2f3657be8 Nov 25 10:19:06 crc kubenswrapper[4769]: I1125 10:19:06.987749 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" event={"ID":"ec573501-99b0-4833-828d-ac951684f714","Type":"ContainerStarted","Data":"6cb5f6f4f1355e1e91eb57cc00255b3dbf551759fded504c3d4732dcc3dd9c43"} Nov 25 10:19:06 crc kubenswrapper[4769]: I1125 10:19:06.991833 4769 generic.go:334] "Generic (PLEG): container finished" podID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" containerID="a756ed8c63babfbe08f474a96f7ff6f23c1bfb6bf44e1966b2b3c285eacdd9d7" exitCode=0 Nov 25 10:19:06 crc kubenswrapper[4769]: I1125 10:19:06.991863 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27wcg" event={"ID":"c52c99c8-da92-47ee-adc5-ef6ab0d91f55","Type":"ContainerDied","Data":"a756ed8c63babfbe08f474a96f7ff6f23c1bfb6bf44e1966b2b3c285eacdd9d7"} Nov 25 10:19:06 crc kubenswrapper[4769]: I1125 10:19:06.991880 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27wcg" event={"ID":"c52c99c8-da92-47ee-adc5-ef6ab0d91f55","Type":"ContainerStarted","Data":"5636bc3486ebd58c869de23f308bf83e7a2867a209e3e6b977192eb2f3657be8"} Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.018267 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27wcg" event={"ID":"c52c99c8-da92-47ee-adc5-ef6ab0d91f55","Type":"ContainerStarted","Data":"ded6af5d5d4f7ea604b0d0bcd086e92644e400af98faf16d455e2655342576c0"} Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.020583 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" event={"ID":"ec573501-99b0-4833-828d-ac951684f714","Type":"ContainerStarted","Data":"23279bc1529590d103cf9687588f54336803958563acfc239df73d573c7f7945"} Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.090032 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" podStartSLOduration=2.4624008760000002 podStartE2EDuration="3.090009103s" podCreationTimestamp="2025-11-25 10:19:05 +0000 UTC" firstStartedPulling="2025-11-25 10:19:06.174800704 +0000 UTC m=+2094.759773017" lastFinishedPulling="2025-11-25 10:19:06.802408921 +0000 UTC m=+2095.387381244" observedRunningTime="2025-11-25 10:19:08.066633318 +0000 UTC m=+2096.651605631" watchObservedRunningTime="2025-11-25 10:19:08.090009103 +0000 UTC m=+2096.674981416" Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.127868 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4mxgh"] Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.131452 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.147200 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4mxgh"] Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.155983 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ae20c30-36fb-41c3-91fa-09538080649e-catalog-content\") pod \"certified-operators-4mxgh\" (UID: \"8ae20c30-36fb-41c3-91fa-09538080649e\") " pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.156421 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ae20c30-36fb-41c3-91fa-09538080649e-utilities\") pod \"certified-operators-4mxgh\" (UID: \"8ae20c30-36fb-41c3-91fa-09538080649e\") " pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.156582 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmd8n\" (UniqueName: \"kubernetes.io/projected/8ae20c30-36fb-41c3-91fa-09538080649e-kube-api-access-fmd8n\") pod \"certified-operators-4mxgh\" (UID: \"8ae20c30-36fb-41c3-91fa-09538080649e\") " pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.259902 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ae20c30-36fb-41c3-91fa-09538080649e-utilities\") pod \"certified-operators-4mxgh\" (UID: \"8ae20c30-36fb-41c3-91fa-09538080649e\") " pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.260043 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmd8n\" (UniqueName: \"kubernetes.io/projected/8ae20c30-36fb-41c3-91fa-09538080649e-kube-api-access-fmd8n\") pod \"certified-operators-4mxgh\" (UID: \"8ae20c30-36fb-41c3-91fa-09538080649e\") " pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.260344 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ae20c30-36fb-41c3-91fa-09538080649e-catalog-content\") pod \"certified-operators-4mxgh\" (UID: \"8ae20c30-36fb-41c3-91fa-09538080649e\") " pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.260708 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ae20c30-36fb-41c3-91fa-09538080649e-utilities\") pod \"certified-operators-4mxgh\" (UID: \"8ae20c30-36fb-41c3-91fa-09538080649e\") " pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.260955 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ae20c30-36fb-41c3-91fa-09538080649e-catalog-content\") pod \"certified-operators-4mxgh\" (UID: \"8ae20c30-36fb-41c3-91fa-09538080649e\") " pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.285695 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmd8n\" (UniqueName: \"kubernetes.io/projected/8ae20c30-36fb-41c3-91fa-09538080649e-kube-api-access-fmd8n\") pod \"certified-operators-4mxgh\" (UID: \"8ae20c30-36fb-41c3-91fa-09538080649e\") " pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:08 crc kubenswrapper[4769]: I1125 10:19:08.472728 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:09 crc kubenswrapper[4769]: I1125 10:19:09.045372 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4mxgh"] Nov 25 10:19:09 crc kubenswrapper[4769]: I1125 10:19:09.073267 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-w6f7r"] Nov 25 10:19:09 crc kubenswrapper[4769]: I1125 10:19:09.086511 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-w6f7r"] Nov 25 10:19:10 crc kubenswrapper[4769]: I1125 10:19:10.053235 4769 generic.go:334] "Generic (PLEG): container finished" podID="8ae20c30-36fb-41c3-91fa-09538080649e" containerID="e1cea740447eef223488929f10b0901f9db6a63407a60d76f800742dbea77d43" exitCode=0 Nov 25 10:19:10 crc kubenswrapper[4769]: I1125 10:19:10.053372 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mxgh" event={"ID":"8ae20c30-36fb-41c3-91fa-09538080649e","Type":"ContainerDied","Data":"e1cea740447eef223488929f10b0901f9db6a63407a60d76f800742dbea77d43"} Nov 25 10:19:10 crc kubenswrapper[4769]: I1125 10:19:10.053599 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mxgh" event={"ID":"8ae20c30-36fb-41c3-91fa-09538080649e","Type":"ContainerStarted","Data":"1315415e9a3254b92ff1280ca1a285ebd827ad52ee4c30bd0ee08ff27684e1c1"} Nov 25 10:19:10 crc kubenswrapper[4769]: I1125 10:19:10.251127 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd25577c-f800-4119-8e6f-20258030dfbc" path="/var/lib/kubelet/pods/bd25577c-f800-4119-8e6f-20258030dfbc/volumes" Nov 25 10:19:11 crc kubenswrapper[4769]: I1125 10:19:11.041451 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-xsqz9"] Nov 25 10:19:11 crc kubenswrapper[4769]: I1125 10:19:11.052265 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-xsqz9"] Nov 25 10:19:11 crc kubenswrapper[4769]: I1125 10:19:11.067627 4769 generic.go:334] "Generic (PLEG): container finished" podID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" containerID="ded6af5d5d4f7ea604b0d0bcd086e92644e400af98faf16d455e2655342576c0" exitCode=0 Nov 25 10:19:11 crc kubenswrapper[4769]: I1125 10:19:11.067692 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27wcg" event={"ID":"c52c99c8-da92-47ee-adc5-ef6ab0d91f55","Type":"ContainerDied","Data":"ded6af5d5d4f7ea604b0d0bcd086e92644e400af98faf16d455e2655342576c0"} Nov 25 10:19:12 crc kubenswrapper[4769]: I1125 10:19:12.080459 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27wcg" event={"ID":"c52c99c8-da92-47ee-adc5-ef6ab0d91f55","Type":"ContainerStarted","Data":"b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7"} Nov 25 10:19:12 crc kubenswrapper[4769]: I1125 10:19:12.087011 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mxgh" event={"ID":"8ae20c30-36fb-41c3-91fa-09538080649e","Type":"ContainerStarted","Data":"d2b4203b346cb38b92dbae6871b032ff61c476806fa50b4512ef9f1899b68973"} Nov 25 10:19:12 crc kubenswrapper[4769]: I1125 10:19:12.108283 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-27wcg" podStartSLOduration=2.610869197 podStartE2EDuration="7.108259275s" podCreationTimestamp="2025-11-25 10:19:05 +0000 UTC" firstStartedPulling="2025-11-25 10:19:06.993841362 +0000 UTC m=+2095.578813675" lastFinishedPulling="2025-11-25 10:19:11.49123139 +0000 UTC m=+2100.076203753" observedRunningTime="2025-11-25 10:19:12.099780508 +0000 UTC m=+2100.684752831" watchObservedRunningTime="2025-11-25 10:19:12.108259275 +0000 UTC m=+2100.693231588" Nov 25 10:19:12 crc kubenswrapper[4769]: I1125 10:19:12.266382 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a7752ba-7e13-4f6c-af46-5df66ad668e3" path="/var/lib/kubelet/pods/8a7752ba-7e13-4f6c-af46-5df66ad668e3/volumes" Nov 25 10:19:13 crc kubenswrapper[4769]: I1125 10:19:13.098257 4769 generic.go:334] "Generic (PLEG): container finished" podID="8ae20c30-36fb-41c3-91fa-09538080649e" containerID="d2b4203b346cb38b92dbae6871b032ff61c476806fa50b4512ef9f1899b68973" exitCode=0 Nov 25 10:19:13 crc kubenswrapper[4769]: I1125 10:19:13.098411 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mxgh" event={"ID":"8ae20c30-36fb-41c3-91fa-09538080649e","Type":"ContainerDied","Data":"d2b4203b346cb38b92dbae6871b032ff61c476806fa50b4512ef9f1899b68973"} Nov 25 10:19:14 crc kubenswrapper[4769]: I1125 10:19:14.113882 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mxgh" event={"ID":"8ae20c30-36fb-41c3-91fa-09538080649e","Type":"ContainerStarted","Data":"ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3"} Nov 25 10:19:14 crc kubenswrapper[4769]: I1125 10:19:14.148167 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4mxgh" podStartSLOduration=2.467755904 podStartE2EDuration="6.148140138s" podCreationTimestamp="2025-11-25 10:19:08 +0000 UTC" firstStartedPulling="2025-11-25 10:19:10.05563398 +0000 UTC m=+2098.640606293" lastFinishedPulling="2025-11-25 10:19:13.736018214 +0000 UTC m=+2102.320990527" observedRunningTime="2025-11-25 10:19:14.134422471 +0000 UTC m=+2102.719394854" watchObservedRunningTime="2025-11-25 10:19:14.148140138 +0000 UTC m=+2102.733112481" Nov 25 10:19:16 crc kubenswrapper[4769]: I1125 10:19:16.057335 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:16 crc kubenswrapper[4769]: I1125 10:19:16.057634 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:17 crc kubenswrapper[4769]: I1125 10:19:17.146762 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-27wcg" podUID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" containerName="registry-server" probeResult="failure" output=< Nov 25 10:19:17 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 10:19:17 crc kubenswrapper[4769]: > Nov 25 10:19:18 crc kubenswrapper[4769]: I1125 10:19:18.474384 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:18 crc kubenswrapper[4769]: I1125 10:19:18.474702 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:18 crc kubenswrapper[4769]: I1125 10:19:18.546355 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:19 crc kubenswrapper[4769]: I1125 10:19:19.290023 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:19 crc kubenswrapper[4769]: I1125 10:19:19.347746 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4mxgh"] Nov 25 10:19:21 crc kubenswrapper[4769]: I1125 10:19:21.202352 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4mxgh" podUID="8ae20c30-36fb-41c3-91fa-09538080649e" containerName="registry-server" containerID="cri-o://ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3" gracePeriod=2 Nov 25 10:19:21 crc kubenswrapper[4769]: I1125 10:19:21.786181 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:21 crc kubenswrapper[4769]: I1125 10:19:21.961482 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmd8n\" (UniqueName: \"kubernetes.io/projected/8ae20c30-36fb-41c3-91fa-09538080649e-kube-api-access-fmd8n\") pod \"8ae20c30-36fb-41c3-91fa-09538080649e\" (UID: \"8ae20c30-36fb-41c3-91fa-09538080649e\") " Nov 25 10:19:21 crc kubenswrapper[4769]: I1125 10:19:21.961546 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ae20c30-36fb-41c3-91fa-09538080649e-catalog-content\") pod \"8ae20c30-36fb-41c3-91fa-09538080649e\" (UID: \"8ae20c30-36fb-41c3-91fa-09538080649e\") " Nov 25 10:19:21 crc kubenswrapper[4769]: I1125 10:19:21.961764 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ae20c30-36fb-41c3-91fa-09538080649e-utilities\") pod \"8ae20c30-36fb-41c3-91fa-09538080649e\" (UID: \"8ae20c30-36fb-41c3-91fa-09538080649e\") " Nov 25 10:19:21 crc kubenswrapper[4769]: I1125 10:19:21.962861 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ae20c30-36fb-41c3-91fa-09538080649e-utilities" (OuterVolumeSpecName: "utilities") pod "8ae20c30-36fb-41c3-91fa-09538080649e" (UID: "8ae20c30-36fb-41c3-91fa-09538080649e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:19:21 crc kubenswrapper[4769]: I1125 10:19:21.963495 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ae20c30-36fb-41c3-91fa-09538080649e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:21 crc kubenswrapper[4769]: I1125 10:19:21.971751 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ae20c30-36fb-41c3-91fa-09538080649e-kube-api-access-fmd8n" (OuterVolumeSpecName: "kube-api-access-fmd8n") pod "8ae20c30-36fb-41c3-91fa-09538080649e" (UID: "8ae20c30-36fb-41c3-91fa-09538080649e"). InnerVolumeSpecName "kube-api-access-fmd8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.007322 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ae20c30-36fb-41c3-91fa-09538080649e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8ae20c30-36fb-41c3-91fa-09538080649e" (UID: "8ae20c30-36fb-41c3-91fa-09538080649e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.066531 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmd8n\" (UniqueName: \"kubernetes.io/projected/8ae20c30-36fb-41c3-91fa-09538080649e-kube-api-access-fmd8n\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.066569 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ae20c30-36fb-41c3-91fa-09538080649e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.215271 4769 generic.go:334] "Generic (PLEG): container finished" podID="8ae20c30-36fb-41c3-91fa-09538080649e" containerID="ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3" exitCode=0 Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.215320 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mxgh" event={"ID":"8ae20c30-36fb-41c3-91fa-09538080649e","Type":"ContainerDied","Data":"ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3"} Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.215357 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4mxgh" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.215374 4769 scope.go:117] "RemoveContainer" containerID="ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.215357 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mxgh" event={"ID":"8ae20c30-36fb-41c3-91fa-09538080649e","Type":"ContainerDied","Data":"1315415e9a3254b92ff1280ca1a285ebd827ad52ee4c30bd0ee08ff27684e1c1"} Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.246252 4769 scope.go:117] "RemoveContainer" containerID="d2b4203b346cb38b92dbae6871b032ff61c476806fa50b4512ef9f1899b68973" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.267272 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4mxgh"] Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.282476 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4mxgh"] Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.287120 4769 scope.go:117] "RemoveContainer" containerID="e1cea740447eef223488929f10b0901f9db6a63407a60d76f800742dbea77d43" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.338781 4769 scope.go:117] "RemoveContainer" containerID="ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3" Nov 25 10:19:22 crc kubenswrapper[4769]: E1125 10:19:22.339427 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3\": container with ID starting with ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3 not found: ID does not exist" containerID="ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.339524 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3"} err="failed to get container status \"ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3\": rpc error: code = NotFound desc = could not find container \"ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3\": container with ID starting with ef2ceb15c3460444b73b9a0392326021d8504b2f60af62f2158a1770224ebac3 not found: ID does not exist" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.339595 4769 scope.go:117] "RemoveContainer" containerID="d2b4203b346cb38b92dbae6871b032ff61c476806fa50b4512ef9f1899b68973" Nov 25 10:19:22 crc kubenswrapper[4769]: E1125 10:19:22.340273 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2b4203b346cb38b92dbae6871b032ff61c476806fa50b4512ef9f1899b68973\": container with ID starting with d2b4203b346cb38b92dbae6871b032ff61c476806fa50b4512ef9f1899b68973 not found: ID does not exist" containerID="d2b4203b346cb38b92dbae6871b032ff61c476806fa50b4512ef9f1899b68973" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.340370 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2b4203b346cb38b92dbae6871b032ff61c476806fa50b4512ef9f1899b68973"} err="failed to get container status \"d2b4203b346cb38b92dbae6871b032ff61c476806fa50b4512ef9f1899b68973\": rpc error: code = NotFound desc = could not find container \"d2b4203b346cb38b92dbae6871b032ff61c476806fa50b4512ef9f1899b68973\": container with ID starting with d2b4203b346cb38b92dbae6871b032ff61c476806fa50b4512ef9f1899b68973 not found: ID does not exist" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.340433 4769 scope.go:117] "RemoveContainer" containerID="e1cea740447eef223488929f10b0901f9db6a63407a60d76f800742dbea77d43" Nov 25 10:19:22 crc kubenswrapper[4769]: E1125 10:19:22.340926 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1cea740447eef223488929f10b0901f9db6a63407a60d76f800742dbea77d43\": container with ID starting with e1cea740447eef223488929f10b0901f9db6a63407a60d76f800742dbea77d43 not found: ID does not exist" containerID="e1cea740447eef223488929f10b0901f9db6a63407a60d76f800742dbea77d43" Nov 25 10:19:22 crc kubenswrapper[4769]: I1125 10:19:22.341019 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1cea740447eef223488929f10b0901f9db6a63407a60d76f800742dbea77d43"} err="failed to get container status \"e1cea740447eef223488929f10b0901f9db6a63407a60d76f800742dbea77d43\": rpc error: code = NotFound desc = could not find container \"e1cea740447eef223488929f10b0901f9db6a63407a60d76f800742dbea77d43\": container with ID starting with e1cea740447eef223488929f10b0901f9db6a63407a60d76f800742dbea77d43 not found: ID does not exist" Nov 25 10:19:24 crc kubenswrapper[4769]: I1125 10:19:24.255706 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ae20c30-36fb-41c3-91fa-09538080649e" path="/var/lib/kubelet/pods/8ae20c30-36fb-41c3-91fa-09538080649e/volumes" Nov 25 10:19:26 crc kubenswrapper[4769]: I1125 10:19:26.128060 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:26 crc kubenswrapper[4769]: I1125 10:19:26.218368 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:26 crc kubenswrapper[4769]: I1125 10:19:26.383309 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-27wcg"] Nov 25 10:19:27 crc kubenswrapper[4769]: I1125 10:19:27.302595 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-27wcg" podUID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" containerName="registry-server" containerID="cri-o://b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7" gracePeriod=2 Nov 25 10:19:27 crc kubenswrapper[4769]: I1125 10:19:27.884852 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.033543 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-utilities\") pod \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\" (UID: \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\") " Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.033647 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmvzr\" (UniqueName: \"kubernetes.io/projected/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-kube-api-access-bmvzr\") pod \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\" (UID: \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\") " Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.033887 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-catalog-content\") pod \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\" (UID: \"c52c99c8-da92-47ee-adc5-ef6ab0d91f55\") " Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.035312 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-utilities" (OuterVolumeSpecName: "utilities") pod "c52c99c8-da92-47ee-adc5-ef6ab0d91f55" (UID: "c52c99c8-da92-47ee-adc5-ef6ab0d91f55"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.041140 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-kube-api-access-bmvzr" (OuterVolumeSpecName: "kube-api-access-bmvzr") pod "c52c99c8-da92-47ee-adc5-ef6ab0d91f55" (UID: "c52c99c8-da92-47ee-adc5-ef6ab0d91f55"). InnerVolumeSpecName "kube-api-access-bmvzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.083094 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c52c99c8-da92-47ee-adc5-ef6ab0d91f55" (UID: "c52c99c8-da92-47ee-adc5-ef6ab0d91f55"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.136490 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.136529 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.136550 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmvzr\" (UniqueName: \"kubernetes.io/projected/c52c99c8-da92-47ee-adc5-ef6ab0d91f55-kube-api-access-bmvzr\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.324058 4769 generic.go:334] "Generic (PLEG): container finished" podID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" containerID="b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7" exitCode=0 Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.324685 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27wcg" event={"ID":"c52c99c8-da92-47ee-adc5-ef6ab0d91f55","Type":"ContainerDied","Data":"b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7"} Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.324737 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-27wcg" event={"ID":"c52c99c8-da92-47ee-adc5-ef6ab0d91f55","Type":"ContainerDied","Data":"5636bc3486ebd58c869de23f308bf83e7a2867a209e3e6b977192eb2f3657be8"} Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.324774 4769 scope.go:117] "RemoveContainer" containerID="b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.325059 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-27wcg" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.369872 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-27wcg"] Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.386282 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-27wcg"] Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.391579 4769 scope.go:117] "RemoveContainer" containerID="ded6af5d5d4f7ea604b0d0bcd086e92644e400af98faf16d455e2655342576c0" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.457257 4769 scope.go:117] "RemoveContainer" containerID="a756ed8c63babfbe08f474a96f7ff6f23c1bfb6bf44e1966b2b3c285eacdd9d7" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.494336 4769 scope.go:117] "RemoveContainer" containerID="b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7" Nov 25 10:19:28 crc kubenswrapper[4769]: E1125 10:19:28.495950 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7\": container with ID starting with b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7 not found: ID does not exist" containerID="b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.496071 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7"} err="failed to get container status \"b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7\": rpc error: code = NotFound desc = could not find container \"b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7\": container with ID starting with b619cb2b8270342ea4d3fd2c68f41550b12bd2e7664a8cf8a4decba07f6cd5f7 not found: ID does not exist" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.496104 4769 scope.go:117] "RemoveContainer" containerID="ded6af5d5d4f7ea604b0d0bcd086e92644e400af98faf16d455e2655342576c0" Nov 25 10:19:28 crc kubenswrapper[4769]: E1125 10:19:28.497567 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ded6af5d5d4f7ea604b0d0bcd086e92644e400af98faf16d455e2655342576c0\": container with ID starting with ded6af5d5d4f7ea604b0d0bcd086e92644e400af98faf16d455e2655342576c0 not found: ID does not exist" containerID="ded6af5d5d4f7ea604b0d0bcd086e92644e400af98faf16d455e2655342576c0" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.497596 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ded6af5d5d4f7ea604b0d0bcd086e92644e400af98faf16d455e2655342576c0"} err="failed to get container status \"ded6af5d5d4f7ea604b0d0bcd086e92644e400af98faf16d455e2655342576c0\": rpc error: code = NotFound desc = could not find container \"ded6af5d5d4f7ea604b0d0bcd086e92644e400af98faf16d455e2655342576c0\": container with ID starting with ded6af5d5d4f7ea604b0d0bcd086e92644e400af98faf16d455e2655342576c0 not found: ID does not exist" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.497613 4769 scope.go:117] "RemoveContainer" containerID="a756ed8c63babfbe08f474a96f7ff6f23c1bfb6bf44e1966b2b3c285eacdd9d7" Nov 25 10:19:28 crc kubenswrapper[4769]: E1125 10:19:28.498136 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a756ed8c63babfbe08f474a96f7ff6f23c1bfb6bf44e1966b2b3c285eacdd9d7\": container with ID starting with a756ed8c63babfbe08f474a96f7ff6f23c1bfb6bf44e1966b2b3c285eacdd9d7 not found: ID does not exist" containerID="a756ed8c63babfbe08f474a96f7ff6f23c1bfb6bf44e1966b2b3c285eacdd9d7" Nov 25 10:19:28 crc kubenswrapper[4769]: I1125 10:19:28.498162 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a756ed8c63babfbe08f474a96f7ff6f23c1bfb6bf44e1966b2b3c285eacdd9d7"} err="failed to get container status \"a756ed8c63babfbe08f474a96f7ff6f23c1bfb6bf44e1966b2b3c285eacdd9d7\": rpc error: code = NotFound desc = could not find container \"a756ed8c63babfbe08f474a96f7ff6f23c1bfb6bf44e1966b2b3c285eacdd9d7\": container with ID starting with a756ed8c63babfbe08f474a96f7ff6f23c1bfb6bf44e1966b2b3c285eacdd9d7 not found: ID does not exist" Nov 25 10:19:30 crc kubenswrapper[4769]: I1125 10:19:30.259280 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" path="/var/lib/kubelet/pods/c52c99c8-da92-47ee-adc5-ef6ab0d91f55/volumes" Nov 25 10:19:47 crc kubenswrapper[4769]: I1125 10:19:47.083828 4769 scope.go:117] "RemoveContainer" containerID="509a1cc9c9e190c94b3b3647dac50326f2abe9ea6fab6c5352c013fd3308fe47" Nov 25 10:19:47 crc kubenswrapper[4769]: I1125 10:19:47.135247 4769 scope.go:117] "RemoveContainer" containerID="4a020519aefc75cb906bbe26de0f7c58f76aab7f5eb5671179e31ca468fc1a6e" Nov 25 10:19:54 crc kubenswrapper[4769]: I1125 10:19:54.071010 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-qh9h6"] Nov 25 10:19:54 crc kubenswrapper[4769]: I1125 10:19:54.082148 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-qh9h6"] Nov 25 10:19:54 crc kubenswrapper[4769]: I1125 10:19:54.263901 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158" path="/var/lib/kubelet/pods/28b6e605-9cd8-4ab4-ab4c-7f9aeb7c1158/volumes" Nov 25 10:20:21 crc kubenswrapper[4769]: I1125 10:20:20.999709 4769 generic.go:334] "Generic (PLEG): container finished" podID="ec573501-99b0-4833-828d-ac951684f714" containerID="23279bc1529590d103cf9687588f54336803958563acfc239df73d573c7f7945" exitCode=0 Nov 25 10:20:21 crc kubenswrapper[4769]: I1125 10:20:20.999818 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" event={"ID":"ec573501-99b0-4833-828d-ac951684f714","Type":"ContainerDied","Data":"23279bc1529590d103cf9687588f54336803958563acfc239df73d573c7f7945"} Nov 25 10:20:22 crc kubenswrapper[4769]: I1125 10:20:22.590975 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:20:22 crc kubenswrapper[4769]: I1125 10:20:22.668740 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ec573501-99b0-4833-828d-ac951684f714-ssh-key\") pod \"ec573501-99b0-4833-828d-ac951684f714\" (UID: \"ec573501-99b0-4833-828d-ac951684f714\") " Nov 25 10:20:22 crc kubenswrapper[4769]: I1125 10:20:22.668797 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ec573501-99b0-4833-828d-ac951684f714-inventory\") pod \"ec573501-99b0-4833-828d-ac951684f714\" (UID: \"ec573501-99b0-4833-828d-ac951684f714\") " Nov 25 10:20:22 crc kubenswrapper[4769]: I1125 10:20:22.669135 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qdm9\" (UniqueName: \"kubernetes.io/projected/ec573501-99b0-4833-828d-ac951684f714-kube-api-access-7qdm9\") pod \"ec573501-99b0-4833-828d-ac951684f714\" (UID: \"ec573501-99b0-4833-828d-ac951684f714\") " Nov 25 10:20:22 crc kubenswrapper[4769]: I1125 10:20:22.675897 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec573501-99b0-4833-828d-ac951684f714-kube-api-access-7qdm9" (OuterVolumeSpecName: "kube-api-access-7qdm9") pod "ec573501-99b0-4833-828d-ac951684f714" (UID: "ec573501-99b0-4833-828d-ac951684f714"). InnerVolumeSpecName "kube-api-access-7qdm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:20:22 crc kubenswrapper[4769]: I1125 10:20:22.711038 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec573501-99b0-4833-828d-ac951684f714-inventory" (OuterVolumeSpecName: "inventory") pod "ec573501-99b0-4833-828d-ac951684f714" (UID: "ec573501-99b0-4833-828d-ac951684f714"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:20:22 crc kubenswrapper[4769]: I1125 10:20:22.722163 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec573501-99b0-4833-828d-ac951684f714-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ec573501-99b0-4833-828d-ac951684f714" (UID: "ec573501-99b0-4833-828d-ac951684f714"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:20:22 crc kubenswrapper[4769]: I1125 10:20:22.772178 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ec573501-99b0-4833-828d-ac951684f714-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:22 crc kubenswrapper[4769]: I1125 10:20:22.772572 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ec573501-99b0-4833-828d-ac951684f714-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:22 crc kubenswrapper[4769]: I1125 10:20:22.772584 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qdm9\" (UniqueName: \"kubernetes.io/projected/ec573501-99b0-4833-828d-ac951684f714-kube-api-access-7qdm9\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.030434 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" event={"ID":"ec573501-99b0-4833-828d-ac951684f714","Type":"ContainerDied","Data":"6cb5f6f4f1355e1e91eb57cc00255b3dbf551759fded504c3d4732dcc3dd9c43"} Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.030742 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6cb5f6f4f1355e1e91eb57cc00255b3dbf551759fded504c3d4732dcc3dd9c43" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.030505 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-56djw" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.146792 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2"] Nov 25 10:20:23 crc kubenswrapper[4769]: E1125 10:20:23.147352 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec573501-99b0-4833-828d-ac951684f714" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.147379 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec573501-99b0-4833-828d-ac951684f714" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 10:20:23 crc kubenswrapper[4769]: E1125 10:20:23.147402 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" containerName="registry-server" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.147410 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" containerName="registry-server" Nov 25 10:20:23 crc kubenswrapper[4769]: E1125 10:20:23.147432 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ae20c30-36fb-41c3-91fa-09538080649e" containerName="extract-content" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.147440 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ae20c30-36fb-41c3-91fa-09538080649e" containerName="extract-content" Nov 25 10:20:23 crc kubenswrapper[4769]: E1125 10:20:23.147457 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" containerName="extract-utilities" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.147464 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" containerName="extract-utilities" Nov 25 10:20:23 crc kubenswrapper[4769]: E1125 10:20:23.147493 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ae20c30-36fb-41c3-91fa-09538080649e" containerName="extract-utilities" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.147503 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ae20c30-36fb-41c3-91fa-09538080649e" containerName="extract-utilities" Nov 25 10:20:23 crc kubenswrapper[4769]: E1125 10:20:23.147521 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" containerName="extract-content" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.147528 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" containerName="extract-content" Nov 25 10:20:23 crc kubenswrapper[4769]: E1125 10:20:23.147544 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ae20c30-36fb-41c3-91fa-09538080649e" containerName="registry-server" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.147550 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ae20c30-36fb-41c3-91fa-09538080649e" containerName="registry-server" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.147814 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec573501-99b0-4833-828d-ac951684f714" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.147856 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ae20c30-36fb-41c3-91fa-09538080649e" containerName="registry-server" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.147874 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c52c99c8-da92-47ee-adc5-ef6ab0d91f55" containerName="registry-server" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.148848 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.156767 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.156769 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.157057 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.157189 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.201018 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2"] Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.289320 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5k9wx\" (UniqueName: \"kubernetes.io/projected/93dbc090-2167-43d9-bddd-d38e311a27ec-kube-api-access-5k9wx\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2\" (UID: \"93dbc090-2167-43d9-bddd-d38e311a27ec\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.289700 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93dbc090-2167-43d9-bddd-d38e311a27ec-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2\" (UID: \"93dbc090-2167-43d9-bddd-d38e311a27ec\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.289772 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93dbc090-2167-43d9-bddd-d38e311a27ec-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2\" (UID: \"93dbc090-2167-43d9-bddd-d38e311a27ec\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.392838 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5k9wx\" (UniqueName: \"kubernetes.io/projected/93dbc090-2167-43d9-bddd-d38e311a27ec-kube-api-access-5k9wx\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2\" (UID: \"93dbc090-2167-43d9-bddd-d38e311a27ec\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.392981 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93dbc090-2167-43d9-bddd-d38e311a27ec-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2\" (UID: \"93dbc090-2167-43d9-bddd-d38e311a27ec\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.393009 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93dbc090-2167-43d9-bddd-d38e311a27ec-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2\" (UID: \"93dbc090-2167-43d9-bddd-d38e311a27ec\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.402616 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93dbc090-2167-43d9-bddd-d38e311a27ec-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2\" (UID: \"93dbc090-2167-43d9-bddd-d38e311a27ec\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.414534 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93dbc090-2167-43d9-bddd-d38e311a27ec-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2\" (UID: \"93dbc090-2167-43d9-bddd-d38e311a27ec\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.436730 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5k9wx\" (UniqueName: \"kubernetes.io/projected/93dbc090-2167-43d9-bddd-d38e311a27ec-kube-api-access-5k9wx\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2\" (UID: \"93dbc090-2167-43d9-bddd-d38e311a27ec\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:23 crc kubenswrapper[4769]: I1125 10:20:23.491981 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:24 crc kubenswrapper[4769]: I1125 10:20:24.131323 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2"] Nov 25 10:20:25 crc kubenswrapper[4769]: I1125 10:20:25.058074 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" event={"ID":"93dbc090-2167-43d9-bddd-d38e311a27ec","Type":"ContainerStarted","Data":"e795033485fb58e544a40379e0e11ee9e540f166ac52114f0ad1dbd01b167479"} Nov 25 10:20:25 crc kubenswrapper[4769]: I1125 10:20:25.058768 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" event={"ID":"93dbc090-2167-43d9-bddd-d38e311a27ec","Type":"ContainerStarted","Data":"250a32545f11c881dcd7d282441c8022c1d3a61905d3e83f34a09ef59560e9b9"} Nov 25 10:20:25 crc kubenswrapper[4769]: I1125 10:20:25.079166 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" podStartSLOduration=1.621166922 podStartE2EDuration="2.079142261s" podCreationTimestamp="2025-11-25 10:20:23 +0000 UTC" firstStartedPulling="2025-11-25 10:20:24.124480996 +0000 UTC m=+2172.709453339" lastFinishedPulling="2025-11-25 10:20:24.582456365 +0000 UTC m=+2173.167428678" observedRunningTime="2025-11-25 10:20:25.073448348 +0000 UTC m=+2173.658420661" watchObservedRunningTime="2025-11-25 10:20:25.079142261 +0000 UTC m=+2173.664114574" Nov 25 10:20:31 crc kubenswrapper[4769]: I1125 10:20:31.167019 4769 generic.go:334] "Generic (PLEG): container finished" podID="93dbc090-2167-43d9-bddd-d38e311a27ec" containerID="e795033485fb58e544a40379e0e11ee9e540f166ac52114f0ad1dbd01b167479" exitCode=0 Nov 25 10:20:31 crc kubenswrapper[4769]: I1125 10:20:31.167656 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" event={"ID":"93dbc090-2167-43d9-bddd-d38e311a27ec","Type":"ContainerDied","Data":"e795033485fb58e544a40379e0e11ee9e540f166ac52114f0ad1dbd01b167479"} Nov 25 10:20:32 crc kubenswrapper[4769]: I1125 10:20:32.747217 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:32 crc kubenswrapper[4769]: I1125 10:20:32.823313 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5k9wx\" (UniqueName: \"kubernetes.io/projected/93dbc090-2167-43d9-bddd-d38e311a27ec-kube-api-access-5k9wx\") pod \"93dbc090-2167-43d9-bddd-d38e311a27ec\" (UID: \"93dbc090-2167-43d9-bddd-d38e311a27ec\") " Nov 25 10:20:32 crc kubenswrapper[4769]: I1125 10:20:32.823409 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93dbc090-2167-43d9-bddd-d38e311a27ec-inventory\") pod \"93dbc090-2167-43d9-bddd-d38e311a27ec\" (UID: \"93dbc090-2167-43d9-bddd-d38e311a27ec\") " Nov 25 10:20:32 crc kubenswrapper[4769]: I1125 10:20:32.823702 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93dbc090-2167-43d9-bddd-d38e311a27ec-ssh-key\") pod \"93dbc090-2167-43d9-bddd-d38e311a27ec\" (UID: \"93dbc090-2167-43d9-bddd-d38e311a27ec\") " Nov 25 10:20:32 crc kubenswrapper[4769]: I1125 10:20:32.834242 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93dbc090-2167-43d9-bddd-d38e311a27ec-kube-api-access-5k9wx" (OuterVolumeSpecName: "kube-api-access-5k9wx") pod "93dbc090-2167-43d9-bddd-d38e311a27ec" (UID: "93dbc090-2167-43d9-bddd-d38e311a27ec"). InnerVolumeSpecName "kube-api-access-5k9wx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:20:32 crc kubenswrapper[4769]: I1125 10:20:32.865929 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93dbc090-2167-43d9-bddd-d38e311a27ec-inventory" (OuterVolumeSpecName: "inventory") pod "93dbc090-2167-43d9-bddd-d38e311a27ec" (UID: "93dbc090-2167-43d9-bddd-d38e311a27ec"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:20:32 crc kubenswrapper[4769]: I1125 10:20:32.885183 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93dbc090-2167-43d9-bddd-d38e311a27ec-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "93dbc090-2167-43d9-bddd-d38e311a27ec" (UID: "93dbc090-2167-43d9-bddd-d38e311a27ec"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:20:32 crc kubenswrapper[4769]: I1125 10:20:32.927909 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93dbc090-2167-43d9-bddd-d38e311a27ec-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:32 crc kubenswrapper[4769]: I1125 10:20:32.927940 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5k9wx\" (UniqueName: \"kubernetes.io/projected/93dbc090-2167-43d9-bddd-d38e311a27ec-kube-api-access-5k9wx\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:32 crc kubenswrapper[4769]: I1125 10:20:32.927952 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93dbc090-2167-43d9-bddd-d38e311a27ec-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.193828 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" event={"ID":"93dbc090-2167-43d9-bddd-d38e311a27ec","Type":"ContainerDied","Data":"250a32545f11c881dcd7d282441c8022c1d3a61905d3e83f34a09ef59560e9b9"} Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.194438 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="250a32545f11c881dcd7d282441c8022c1d3a61905d3e83f34a09ef59560e9b9" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.193978 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.295109 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd"] Nov 25 10:20:33 crc kubenswrapper[4769]: E1125 10:20:33.295768 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93dbc090-2167-43d9-bddd-d38e311a27ec" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.295802 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="93dbc090-2167-43d9-bddd-d38e311a27ec" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.296072 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="93dbc090-2167-43d9-bddd-d38e311a27ec" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.297080 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.300089 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.301216 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.301569 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.305325 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.336113 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd"] Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.338520 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78067229-e9ea-41be-ae04-26579218f6d1-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-m8dzd\" (UID: \"78067229-e9ea-41be-ae04-26579218f6d1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.338718 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qgg9v\" (UniqueName: \"kubernetes.io/projected/78067229-e9ea-41be-ae04-26579218f6d1-kube-api-access-qgg9v\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-m8dzd\" (UID: \"78067229-e9ea-41be-ae04-26579218f6d1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.338849 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78067229-e9ea-41be-ae04-26579218f6d1-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-m8dzd\" (UID: \"78067229-e9ea-41be-ae04-26579218f6d1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.442523 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qgg9v\" (UniqueName: \"kubernetes.io/projected/78067229-e9ea-41be-ae04-26579218f6d1-kube-api-access-qgg9v\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-m8dzd\" (UID: \"78067229-e9ea-41be-ae04-26579218f6d1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.442670 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78067229-e9ea-41be-ae04-26579218f6d1-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-m8dzd\" (UID: \"78067229-e9ea-41be-ae04-26579218f6d1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.442752 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78067229-e9ea-41be-ae04-26579218f6d1-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-m8dzd\" (UID: \"78067229-e9ea-41be-ae04-26579218f6d1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.448067 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78067229-e9ea-41be-ae04-26579218f6d1-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-m8dzd\" (UID: \"78067229-e9ea-41be-ae04-26579218f6d1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.452235 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78067229-e9ea-41be-ae04-26579218f6d1-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-m8dzd\" (UID: \"78067229-e9ea-41be-ae04-26579218f6d1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.468276 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qgg9v\" (UniqueName: \"kubernetes.io/projected/78067229-e9ea-41be-ae04-26579218f6d1-kube-api-access-qgg9v\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-m8dzd\" (UID: \"78067229-e9ea-41be-ae04-26579218f6d1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:20:33 crc kubenswrapper[4769]: I1125 10:20:33.619631 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:20:34 crc kubenswrapper[4769]: I1125 10:20:34.301933 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd"] Nov 25 10:20:35 crc kubenswrapper[4769]: I1125 10:20:35.235520 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" event={"ID":"78067229-e9ea-41be-ae04-26579218f6d1","Type":"ContainerStarted","Data":"eb871ca1934d31de00b7e4cba6e803df318c34db40b08c42288c217729773b7d"} Nov 25 10:20:36 crc kubenswrapper[4769]: I1125 10:20:36.261392 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" event={"ID":"78067229-e9ea-41be-ae04-26579218f6d1","Type":"ContainerStarted","Data":"b6193c6570afe83b319f4e2cea6d2aa6fc0ef584d7fc491675afb33d2ab8c066"} Nov 25 10:20:36 crc kubenswrapper[4769]: I1125 10:20:36.283593 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" podStartSLOduration=2.545944682 podStartE2EDuration="3.283567811s" podCreationTimestamp="2025-11-25 10:20:33 +0000 UTC" firstStartedPulling="2025-11-25 10:20:34.309667923 +0000 UTC m=+2182.894640236" lastFinishedPulling="2025-11-25 10:20:35.047291052 +0000 UTC m=+2183.632263365" observedRunningTime="2025-11-25 10:20:36.275179757 +0000 UTC m=+2184.860152070" watchObservedRunningTime="2025-11-25 10:20:36.283567811 +0000 UTC m=+2184.868540124" Nov 25 10:20:47 crc kubenswrapper[4769]: I1125 10:20:47.321386 4769 scope.go:117] "RemoveContainer" containerID="b50fe91ee9b296b81fe73d33f1dd9caa29a3905ac6a627ba3d332b6c85ace7f0" Nov 25 10:21:13 crc kubenswrapper[4769]: I1125 10:21:13.736496 4769 generic.go:334] "Generic (PLEG): container finished" podID="78067229-e9ea-41be-ae04-26579218f6d1" containerID="b6193c6570afe83b319f4e2cea6d2aa6fc0ef584d7fc491675afb33d2ab8c066" exitCode=0 Nov 25 10:21:13 crc kubenswrapper[4769]: I1125 10:21:13.736645 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" event={"ID":"78067229-e9ea-41be-ae04-26579218f6d1","Type":"ContainerDied","Data":"b6193c6570afe83b319f4e2cea6d2aa6fc0ef584d7fc491675afb33d2ab8c066"} Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.273417 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.412812 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78067229-e9ea-41be-ae04-26579218f6d1-ssh-key\") pod \"78067229-e9ea-41be-ae04-26579218f6d1\" (UID: \"78067229-e9ea-41be-ae04-26579218f6d1\") " Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.412925 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78067229-e9ea-41be-ae04-26579218f6d1-inventory\") pod \"78067229-e9ea-41be-ae04-26579218f6d1\" (UID: \"78067229-e9ea-41be-ae04-26579218f6d1\") " Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.413151 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qgg9v\" (UniqueName: \"kubernetes.io/projected/78067229-e9ea-41be-ae04-26579218f6d1-kube-api-access-qgg9v\") pod \"78067229-e9ea-41be-ae04-26579218f6d1\" (UID: \"78067229-e9ea-41be-ae04-26579218f6d1\") " Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.421098 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78067229-e9ea-41be-ae04-26579218f6d1-kube-api-access-qgg9v" (OuterVolumeSpecName: "kube-api-access-qgg9v") pod "78067229-e9ea-41be-ae04-26579218f6d1" (UID: "78067229-e9ea-41be-ae04-26579218f6d1"). InnerVolumeSpecName "kube-api-access-qgg9v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.447647 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78067229-e9ea-41be-ae04-26579218f6d1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "78067229-e9ea-41be-ae04-26579218f6d1" (UID: "78067229-e9ea-41be-ae04-26579218f6d1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.461192 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78067229-e9ea-41be-ae04-26579218f6d1-inventory" (OuterVolumeSpecName: "inventory") pod "78067229-e9ea-41be-ae04-26579218f6d1" (UID: "78067229-e9ea-41be-ae04-26579218f6d1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.517148 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78067229-e9ea-41be-ae04-26579218f6d1-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.517195 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qgg9v\" (UniqueName: \"kubernetes.io/projected/78067229-e9ea-41be-ae04-26579218f6d1-kube-api-access-qgg9v\") on node \"crc\" DevicePath \"\"" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.517208 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78067229-e9ea-41be-ae04-26579218f6d1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.758370 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" event={"ID":"78067229-e9ea-41be-ae04-26579218f6d1","Type":"ContainerDied","Data":"eb871ca1934d31de00b7e4cba6e803df318c34db40b08c42288c217729773b7d"} Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.758409 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb871ca1934d31de00b7e4cba6e803df318c34db40b08c42288c217729773b7d" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.758459 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-m8dzd" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.891310 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5"] Nov 25 10:21:15 crc kubenswrapper[4769]: E1125 10:21:15.892360 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78067229-e9ea-41be-ae04-26579218f6d1" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.892397 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="78067229-e9ea-41be-ae04-26579218f6d1" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.892849 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="78067229-e9ea-41be-ae04-26579218f6d1" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.894386 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.896762 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.897435 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.897853 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.898546 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:21:15 crc kubenswrapper[4769]: I1125 10:21:15.913437 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5"] Nov 25 10:21:16 crc kubenswrapper[4769]: I1125 10:21:16.028143 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nwds5\" (UID: \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:21:16 crc kubenswrapper[4769]: I1125 10:21:16.028665 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdptr\" (UniqueName: \"kubernetes.io/projected/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-kube-api-access-jdptr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nwds5\" (UID: \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:21:16 crc kubenswrapper[4769]: I1125 10:21:16.029189 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nwds5\" (UID: \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:21:16 crc kubenswrapper[4769]: I1125 10:21:16.132929 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nwds5\" (UID: \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:21:16 crc kubenswrapper[4769]: I1125 10:21:16.133088 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nwds5\" (UID: \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:21:16 crc kubenswrapper[4769]: I1125 10:21:16.133394 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdptr\" (UniqueName: \"kubernetes.io/projected/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-kube-api-access-jdptr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nwds5\" (UID: \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:21:16 crc kubenswrapper[4769]: I1125 10:21:16.143329 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nwds5\" (UID: \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:21:16 crc kubenswrapper[4769]: I1125 10:21:16.156415 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nwds5\" (UID: \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:21:16 crc kubenswrapper[4769]: I1125 10:21:16.160197 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdptr\" (UniqueName: \"kubernetes.io/projected/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-kube-api-access-jdptr\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-nwds5\" (UID: \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:21:16 crc kubenswrapper[4769]: I1125 10:21:16.223313 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:21:16 crc kubenswrapper[4769]: I1125 10:21:16.883763 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5"] Nov 25 10:21:17 crc kubenswrapper[4769]: I1125 10:21:17.043690 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-gvtk6"] Nov 25 10:21:17 crc kubenswrapper[4769]: I1125 10:21:17.054778 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-gvtk6"] Nov 25 10:21:17 crc kubenswrapper[4769]: I1125 10:21:17.792202 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" event={"ID":"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a","Type":"ContainerStarted","Data":"3914c3048287ab9abace7e9ad2667caa51fd9183adc1031e87c1b33cfcffe048"} Nov 25 10:21:18 crc kubenswrapper[4769]: I1125 10:21:18.252130 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="495df49b-2ea1-4ee3-8d6d-5607d639d308" path="/var/lib/kubelet/pods/495df49b-2ea1-4ee3-8d6d-5607d639d308/volumes" Nov 25 10:21:18 crc kubenswrapper[4769]: I1125 10:21:18.806113 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" event={"ID":"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a","Type":"ContainerStarted","Data":"992bfa98416fd11b86f2b3050ebd3b0b5f3f79a3c2b43c2586e615b9c8bc32f3"} Nov 25 10:21:18 crc kubenswrapper[4769]: I1125 10:21:18.834136 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" podStartSLOduration=2.828772219 podStartE2EDuration="3.834111257s" podCreationTimestamp="2025-11-25 10:21:15 +0000 UTC" firstStartedPulling="2025-11-25 10:21:16.891665274 +0000 UTC m=+2225.476637597" lastFinishedPulling="2025-11-25 10:21:17.897004312 +0000 UTC m=+2226.481976635" observedRunningTime="2025-11-25 10:21:18.826495994 +0000 UTC m=+2227.411468307" watchObservedRunningTime="2025-11-25 10:21:18.834111257 +0000 UTC m=+2227.419083580" Nov 25 10:21:22 crc kubenswrapper[4769]: I1125 10:21:22.290937 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:21:22 crc kubenswrapper[4769]: I1125 10:21:22.291352 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:21:47 crc kubenswrapper[4769]: I1125 10:21:47.471445 4769 scope.go:117] "RemoveContainer" containerID="c3b08b14e9174ed05cc087a52cc807bf556d230a368177e6296fa17122ebeca7" Nov 25 10:21:52 crc kubenswrapper[4769]: I1125 10:21:52.289996 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:21:52 crc kubenswrapper[4769]: I1125 10:21:52.290450 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:21:59 crc kubenswrapper[4769]: I1125 10:21:59.052145 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-7dvlm"] Nov 25 10:21:59 crc kubenswrapper[4769]: I1125 10:21:59.063188 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-7dvlm"] Nov 25 10:22:00 crc kubenswrapper[4769]: I1125 10:22:00.255377 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8738db83-202d-4c3c-bed9-2492a4a611c8" path="/var/lib/kubelet/pods/8738db83-202d-4c3c-bed9-2492a4a611c8/volumes" Nov 25 10:22:09 crc kubenswrapper[4769]: I1125 10:22:09.399175 4769 generic.go:334] "Generic (PLEG): container finished" podID="fcf2a670-6bb0-4fbd-bb3a-a681699fec4a" containerID="992bfa98416fd11b86f2b3050ebd3b0b5f3f79a3c2b43c2586e615b9c8bc32f3" exitCode=0 Nov 25 10:22:09 crc kubenswrapper[4769]: I1125 10:22:09.399275 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" event={"ID":"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a","Type":"ContainerDied","Data":"992bfa98416fd11b86f2b3050ebd3b0b5f3f79a3c2b43c2586e615b9c8bc32f3"} Nov 25 10:22:10 crc kubenswrapper[4769]: I1125 10:22:10.916594 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.043191 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-inventory\") pod \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\" (UID: \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\") " Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.043348 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-ssh-key\") pod \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\" (UID: \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\") " Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.043481 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdptr\" (UniqueName: \"kubernetes.io/projected/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-kube-api-access-jdptr\") pod \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\" (UID: \"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a\") " Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.050713 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-kube-api-access-jdptr" (OuterVolumeSpecName: "kube-api-access-jdptr") pod "fcf2a670-6bb0-4fbd-bb3a-a681699fec4a" (UID: "fcf2a670-6bb0-4fbd-bb3a-a681699fec4a"). InnerVolumeSpecName "kube-api-access-jdptr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.096231 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "fcf2a670-6bb0-4fbd-bb3a-a681699fec4a" (UID: "fcf2a670-6bb0-4fbd-bb3a-a681699fec4a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.099828 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-inventory" (OuterVolumeSpecName: "inventory") pod "fcf2a670-6bb0-4fbd-bb3a-a681699fec4a" (UID: "fcf2a670-6bb0-4fbd-bb3a-a681699fec4a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.146887 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.146934 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.146944 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdptr\" (UniqueName: \"kubernetes.io/projected/fcf2a670-6bb0-4fbd-bb3a-a681699fec4a-kube-api-access-jdptr\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.434675 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" event={"ID":"fcf2a670-6bb0-4fbd-bb3a-a681699fec4a","Type":"ContainerDied","Data":"3914c3048287ab9abace7e9ad2667caa51fd9183adc1031e87c1b33cfcffe048"} Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.434726 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3914c3048287ab9abace7e9ad2667caa51fd9183adc1031e87c1b33cfcffe048" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.434804 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-nwds5" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.548759 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-7tjkl"] Nov 25 10:22:11 crc kubenswrapper[4769]: E1125 10:22:11.549467 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcf2a670-6bb0-4fbd-bb3a-a681699fec4a" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.549485 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcf2a670-6bb0-4fbd-bb3a-a681699fec4a" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.549760 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcf2a670-6bb0-4fbd-bb3a-a681699fec4a" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.550633 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.553334 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.554259 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.554261 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.555137 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.571508 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-7tjkl"] Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.663398 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6dde7865-02ef-4557-948d-c5fb0ddf73ed-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-7tjkl\" (UID: \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\") " pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.664496 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqmvz\" (UniqueName: \"kubernetes.io/projected/6dde7865-02ef-4557-948d-c5fb0ddf73ed-kube-api-access-gqmvz\") pod \"ssh-known-hosts-edpm-deployment-7tjkl\" (UID: \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\") " pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.664680 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6dde7865-02ef-4557-948d-c5fb0ddf73ed-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-7tjkl\" (UID: \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\") " pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.769205 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqmvz\" (UniqueName: \"kubernetes.io/projected/6dde7865-02ef-4557-948d-c5fb0ddf73ed-kube-api-access-gqmvz\") pod \"ssh-known-hosts-edpm-deployment-7tjkl\" (UID: \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\") " pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.769427 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6dde7865-02ef-4557-948d-c5fb0ddf73ed-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-7tjkl\" (UID: \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\") " pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.769686 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6dde7865-02ef-4557-948d-c5fb0ddf73ed-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-7tjkl\" (UID: \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\") " pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.782942 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6dde7865-02ef-4557-948d-c5fb0ddf73ed-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-7tjkl\" (UID: \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\") " pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.784101 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6dde7865-02ef-4557-948d-c5fb0ddf73ed-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-7tjkl\" (UID: \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\") " pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.797089 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqmvz\" (UniqueName: \"kubernetes.io/projected/6dde7865-02ef-4557-948d-c5fb0ddf73ed-kube-api-access-gqmvz\") pod \"ssh-known-hosts-edpm-deployment-7tjkl\" (UID: \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\") " pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:11 crc kubenswrapper[4769]: I1125 10:22:11.876695 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:12 crc kubenswrapper[4769]: I1125 10:22:12.478937 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-7tjkl"] Nov 25 10:22:12 crc kubenswrapper[4769]: W1125 10:22:12.487593 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6dde7865_02ef_4557_948d_c5fb0ddf73ed.slice/crio-7d74b3e15ae859fc19fb707306f622823aeea61781f87c15524dd89c2700b487 WatchSource:0}: Error finding container 7d74b3e15ae859fc19fb707306f622823aeea61781f87c15524dd89c2700b487: Status 404 returned error can't find the container with id 7d74b3e15ae859fc19fb707306f622823aeea61781f87c15524dd89c2700b487 Nov 25 10:22:12 crc kubenswrapper[4769]: I1125 10:22:12.493614 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:22:13 crc kubenswrapper[4769]: I1125 10:22:13.182208 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:22:13 crc kubenswrapper[4769]: I1125 10:22:13.475620 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" event={"ID":"6dde7865-02ef-4557-948d-c5fb0ddf73ed","Type":"ContainerStarted","Data":"7d74b3e15ae859fc19fb707306f622823aeea61781f87c15524dd89c2700b487"} Nov 25 10:22:14 crc kubenswrapper[4769]: I1125 10:22:14.486527 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" event={"ID":"6dde7865-02ef-4557-948d-c5fb0ddf73ed","Type":"ContainerStarted","Data":"5825162d5d13908275b12503a9b47bdedc5f0471836318d7ad0c1033b4b96fc0"} Nov 25 10:22:14 crc kubenswrapper[4769]: I1125 10:22:14.513203 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" podStartSLOduration=2.827854559 podStartE2EDuration="3.513177754s" podCreationTimestamp="2025-11-25 10:22:11 +0000 UTC" firstStartedPulling="2025-11-25 10:22:12.493371545 +0000 UTC m=+2281.078343858" lastFinishedPulling="2025-11-25 10:22:13.17869474 +0000 UTC m=+2281.763667053" observedRunningTime="2025-11-25 10:22:14.50550655 +0000 UTC m=+2283.090478863" watchObservedRunningTime="2025-11-25 10:22:14.513177754 +0000 UTC m=+2283.098150067" Nov 25 10:22:17 crc kubenswrapper[4769]: I1125 10:22:17.777736 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7q7bz"] Nov 25 10:22:17 crc kubenswrapper[4769]: I1125 10:22:17.781144 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:17 crc kubenswrapper[4769]: I1125 10:22:17.786145 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66bf18d5-d135-4e7d-8dde-62a8988b46c6-utilities\") pod \"redhat-operators-7q7bz\" (UID: \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\") " pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:17 crc kubenswrapper[4769]: I1125 10:22:17.786720 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66bf18d5-d135-4e7d-8dde-62a8988b46c6-catalog-content\") pod \"redhat-operators-7q7bz\" (UID: \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\") " pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:17 crc kubenswrapper[4769]: I1125 10:22:17.786887 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6wtm\" (UniqueName: \"kubernetes.io/projected/66bf18d5-d135-4e7d-8dde-62a8988b46c6-kube-api-access-j6wtm\") pod \"redhat-operators-7q7bz\" (UID: \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\") " pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:17 crc kubenswrapper[4769]: I1125 10:22:17.803304 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7q7bz"] Nov 25 10:22:17 crc kubenswrapper[4769]: I1125 10:22:17.888939 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66bf18d5-d135-4e7d-8dde-62a8988b46c6-utilities\") pod \"redhat-operators-7q7bz\" (UID: \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\") " pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:17 crc kubenswrapper[4769]: I1125 10:22:17.889125 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66bf18d5-d135-4e7d-8dde-62a8988b46c6-catalog-content\") pod \"redhat-operators-7q7bz\" (UID: \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\") " pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:17 crc kubenswrapper[4769]: I1125 10:22:17.889182 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6wtm\" (UniqueName: \"kubernetes.io/projected/66bf18d5-d135-4e7d-8dde-62a8988b46c6-kube-api-access-j6wtm\") pod \"redhat-operators-7q7bz\" (UID: \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\") " pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:17 crc kubenswrapper[4769]: I1125 10:22:17.889558 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66bf18d5-d135-4e7d-8dde-62a8988b46c6-utilities\") pod \"redhat-operators-7q7bz\" (UID: \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\") " pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:17 crc kubenswrapper[4769]: I1125 10:22:17.889786 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66bf18d5-d135-4e7d-8dde-62a8988b46c6-catalog-content\") pod \"redhat-operators-7q7bz\" (UID: \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\") " pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:17 crc kubenswrapper[4769]: I1125 10:22:17.921608 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6wtm\" (UniqueName: \"kubernetes.io/projected/66bf18d5-d135-4e7d-8dde-62a8988b46c6-kube-api-access-j6wtm\") pod \"redhat-operators-7q7bz\" (UID: \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\") " pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:18 crc kubenswrapper[4769]: I1125 10:22:18.121281 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:18 crc kubenswrapper[4769]: I1125 10:22:18.640948 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7q7bz"] Nov 25 10:22:19 crc kubenswrapper[4769]: I1125 10:22:19.566467 4769 generic.go:334] "Generic (PLEG): container finished" podID="66bf18d5-d135-4e7d-8dde-62a8988b46c6" containerID="54c02b08570aa8e84ea671a38a1cf1825a7ff8aae976a358e5bcc446d534e06a" exitCode=0 Nov 25 10:22:19 crc kubenswrapper[4769]: I1125 10:22:19.566527 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7q7bz" event={"ID":"66bf18d5-d135-4e7d-8dde-62a8988b46c6","Type":"ContainerDied","Data":"54c02b08570aa8e84ea671a38a1cf1825a7ff8aae976a358e5bcc446d534e06a"} Nov 25 10:22:19 crc kubenswrapper[4769]: I1125 10:22:19.566782 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7q7bz" event={"ID":"66bf18d5-d135-4e7d-8dde-62a8988b46c6","Type":"ContainerStarted","Data":"73645827e43c786b148b0091a02b4b1549ff076e85af6b22bcb7ada86ee8a8f2"} Nov 25 10:22:20 crc kubenswrapper[4769]: I1125 10:22:20.586813 4769 generic.go:334] "Generic (PLEG): container finished" podID="6dde7865-02ef-4557-948d-c5fb0ddf73ed" containerID="5825162d5d13908275b12503a9b47bdedc5f0471836318d7ad0c1033b4b96fc0" exitCode=0 Nov 25 10:22:20 crc kubenswrapper[4769]: I1125 10:22:20.586906 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" event={"ID":"6dde7865-02ef-4557-948d-c5fb0ddf73ed","Type":"ContainerDied","Data":"5825162d5d13908275b12503a9b47bdedc5f0471836318d7ad0c1033b4b96fc0"} Nov 25 10:22:21 crc kubenswrapper[4769]: I1125 10:22:21.609751 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7q7bz" event={"ID":"66bf18d5-d135-4e7d-8dde-62a8988b46c6","Type":"ContainerStarted","Data":"ba8d8850d77a436165a83868d9b2e09a8d54aca842e4c69e71ea252258e55f76"} Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.156440 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.210428 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqmvz\" (UniqueName: \"kubernetes.io/projected/6dde7865-02ef-4557-948d-c5fb0ddf73ed-kube-api-access-gqmvz\") pod \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\" (UID: \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\") " Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.210531 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6dde7865-02ef-4557-948d-c5fb0ddf73ed-inventory-0\") pod \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\" (UID: \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\") " Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.210669 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6dde7865-02ef-4557-948d-c5fb0ddf73ed-ssh-key-openstack-edpm-ipam\") pod \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\" (UID: \"6dde7865-02ef-4557-948d-c5fb0ddf73ed\") " Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.218848 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6dde7865-02ef-4557-948d-c5fb0ddf73ed-kube-api-access-gqmvz" (OuterVolumeSpecName: "kube-api-access-gqmvz") pod "6dde7865-02ef-4557-948d-c5fb0ddf73ed" (UID: "6dde7865-02ef-4557-948d-c5fb0ddf73ed"). InnerVolumeSpecName "kube-api-access-gqmvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.247765 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6dde7865-02ef-4557-948d-c5fb0ddf73ed-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "6dde7865-02ef-4557-948d-c5fb0ddf73ed" (UID: "6dde7865-02ef-4557-948d-c5fb0ddf73ed"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.254898 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6dde7865-02ef-4557-948d-c5fb0ddf73ed-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "6dde7865-02ef-4557-948d-c5fb0ddf73ed" (UID: "6dde7865-02ef-4557-948d-c5fb0ddf73ed"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.290815 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.290883 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.314210 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/6dde7865-02ef-4557-948d-c5fb0ddf73ed-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.314253 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqmvz\" (UniqueName: \"kubernetes.io/projected/6dde7865-02ef-4557-948d-c5fb0ddf73ed-kube-api-access-gqmvz\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.314294 4769 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/6dde7865-02ef-4557-948d-c5fb0ddf73ed-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.327998 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.329379 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.329485 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" gracePeriod=600 Nov 25 10:22:22 crc kubenswrapper[4769]: E1125 10:22:22.465376 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.622951 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" event={"ID":"6dde7865-02ef-4557-948d-c5fb0ddf73ed","Type":"ContainerDied","Data":"7d74b3e15ae859fc19fb707306f622823aeea61781f87c15524dd89c2700b487"} Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.623278 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d74b3e15ae859fc19fb707306f622823aeea61781f87c15524dd89c2700b487" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.623245 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-7tjkl" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.626623 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" exitCode=0 Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.626664 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b"} Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.626731 4769 scope.go:117] "RemoveContainer" containerID="9806b9991b42941f4f09cbc1245d89fa1181b893dd6fc482374becd2c41fa1bf" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.628446 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:22:22 crc kubenswrapper[4769]: E1125 10:22:22.629169 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.785319 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582"] Nov 25 10:22:22 crc kubenswrapper[4769]: E1125 10:22:22.787246 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dde7865-02ef-4557-948d-c5fb0ddf73ed" containerName="ssh-known-hosts-edpm-deployment" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.787283 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dde7865-02ef-4557-948d-c5fb0ddf73ed" containerName="ssh-known-hosts-edpm-deployment" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.788793 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="6dde7865-02ef-4557-948d-c5fb0ddf73ed" containerName="ssh-known-hosts-edpm-deployment" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.789978 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.805001 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.805061 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.805001 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.805011 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.806141 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582"] Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.930499 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d3cada76-b876-4f41-b8de-dc02456762e1-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-r8582\" (UID: \"d3cada76-b876-4f41-b8de-dc02456762e1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.930667 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvsvc\" (UniqueName: \"kubernetes.io/projected/d3cada76-b876-4f41-b8de-dc02456762e1-kube-api-access-jvsvc\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-r8582\" (UID: \"d3cada76-b876-4f41-b8de-dc02456762e1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:22 crc kubenswrapper[4769]: I1125 10:22:22.930791 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d3cada76-b876-4f41-b8de-dc02456762e1-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-r8582\" (UID: \"d3cada76-b876-4f41-b8de-dc02456762e1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:23 crc kubenswrapper[4769]: I1125 10:22:23.033569 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d3cada76-b876-4f41-b8de-dc02456762e1-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-r8582\" (UID: \"d3cada76-b876-4f41-b8de-dc02456762e1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:23 crc kubenswrapper[4769]: I1125 10:22:23.033648 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvsvc\" (UniqueName: \"kubernetes.io/projected/d3cada76-b876-4f41-b8de-dc02456762e1-kube-api-access-jvsvc\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-r8582\" (UID: \"d3cada76-b876-4f41-b8de-dc02456762e1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:23 crc kubenswrapper[4769]: I1125 10:22:23.033716 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d3cada76-b876-4f41-b8de-dc02456762e1-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-r8582\" (UID: \"d3cada76-b876-4f41-b8de-dc02456762e1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:23 crc kubenswrapper[4769]: I1125 10:22:23.039462 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d3cada76-b876-4f41-b8de-dc02456762e1-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-r8582\" (UID: \"d3cada76-b876-4f41-b8de-dc02456762e1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:23 crc kubenswrapper[4769]: I1125 10:22:23.041936 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d3cada76-b876-4f41-b8de-dc02456762e1-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-r8582\" (UID: \"d3cada76-b876-4f41-b8de-dc02456762e1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:23 crc kubenswrapper[4769]: I1125 10:22:23.063280 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvsvc\" (UniqueName: \"kubernetes.io/projected/d3cada76-b876-4f41-b8de-dc02456762e1-kube-api-access-jvsvc\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-r8582\" (UID: \"d3cada76-b876-4f41-b8de-dc02456762e1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:23 crc kubenswrapper[4769]: I1125 10:22:23.118827 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:23 crc kubenswrapper[4769]: I1125 10:22:23.746261 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582"] Nov 25 10:22:23 crc kubenswrapper[4769]: W1125 10:22:23.747752 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3cada76_b876_4f41_b8de_dc02456762e1.slice/crio-9aaa1be4cd123036b760991fce6955f50423b8278722a44568e89e9207a79da0 WatchSource:0}: Error finding container 9aaa1be4cd123036b760991fce6955f50423b8278722a44568e89e9207a79da0: Status 404 returned error can't find the container with id 9aaa1be4cd123036b760991fce6955f50423b8278722a44568e89e9207a79da0 Nov 25 10:22:24 crc kubenswrapper[4769]: I1125 10:22:24.661210 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" event={"ID":"d3cada76-b876-4f41-b8de-dc02456762e1","Type":"ContainerStarted","Data":"9aaa1be4cd123036b760991fce6955f50423b8278722a44568e89e9207a79da0"} Nov 25 10:22:26 crc kubenswrapper[4769]: I1125 10:22:26.695163 4769 generic.go:334] "Generic (PLEG): container finished" podID="66bf18d5-d135-4e7d-8dde-62a8988b46c6" containerID="ba8d8850d77a436165a83868d9b2e09a8d54aca842e4c69e71ea252258e55f76" exitCode=0 Nov 25 10:22:26 crc kubenswrapper[4769]: I1125 10:22:26.695257 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7q7bz" event={"ID":"66bf18d5-d135-4e7d-8dde-62a8988b46c6","Type":"ContainerDied","Data":"ba8d8850d77a436165a83868d9b2e09a8d54aca842e4c69e71ea252258e55f76"} Nov 25 10:22:26 crc kubenswrapper[4769]: I1125 10:22:26.701680 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" event={"ID":"d3cada76-b876-4f41-b8de-dc02456762e1","Type":"ContainerStarted","Data":"5a0e354a2871d4a2567f80cef0f36359b9cfbc7f0e94d2d46d135c528f9d7bdb"} Nov 25 10:22:26 crc kubenswrapper[4769]: I1125 10:22:26.752211 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" podStartSLOduration=3.096140166 podStartE2EDuration="4.752173515s" podCreationTimestamp="2025-11-25 10:22:22 +0000 UTC" firstStartedPulling="2025-11-25 10:22:23.751522272 +0000 UTC m=+2292.336494595" lastFinishedPulling="2025-11-25 10:22:25.407555621 +0000 UTC m=+2293.992527944" observedRunningTime="2025-11-25 10:22:26.746455272 +0000 UTC m=+2295.331427615" watchObservedRunningTime="2025-11-25 10:22:26.752173515 +0000 UTC m=+2295.337145868" Nov 25 10:22:28 crc kubenswrapper[4769]: I1125 10:22:28.729783 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7q7bz" event={"ID":"66bf18d5-d135-4e7d-8dde-62a8988b46c6","Type":"ContainerStarted","Data":"60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0"} Nov 25 10:22:28 crc kubenswrapper[4769]: I1125 10:22:28.768707 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7q7bz" podStartSLOduration=3.915382231 podStartE2EDuration="11.768679946s" podCreationTimestamp="2025-11-25 10:22:17 +0000 UTC" firstStartedPulling="2025-11-25 10:22:19.568855245 +0000 UTC m=+2288.153827558" lastFinishedPulling="2025-11-25 10:22:27.42215295 +0000 UTC m=+2296.007125273" observedRunningTime="2025-11-25 10:22:28.758419012 +0000 UTC m=+2297.343391355" watchObservedRunningTime="2025-11-25 10:22:28.768679946 +0000 UTC m=+2297.353652269" Nov 25 10:22:34 crc kubenswrapper[4769]: I1125 10:22:34.805722 4769 generic.go:334] "Generic (PLEG): container finished" podID="d3cada76-b876-4f41-b8de-dc02456762e1" containerID="5a0e354a2871d4a2567f80cef0f36359b9cfbc7f0e94d2d46d135c528f9d7bdb" exitCode=0 Nov 25 10:22:34 crc kubenswrapper[4769]: I1125 10:22:34.805812 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" event={"ID":"d3cada76-b876-4f41-b8de-dc02456762e1","Type":"ContainerDied","Data":"5a0e354a2871d4a2567f80cef0f36359b9cfbc7f0e94d2d46d135c528f9d7bdb"} Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.240021 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:22:36 crc kubenswrapper[4769]: E1125 10:22:36.241370 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.438714 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.531564 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvsvc\" (UniqueName: \"kubernetes.io/projected/d3cada76-b876-4f41-b8de-dc02456762e1-kube-api-access-jvsvc\") pod \"d3cada76-b876-4f41-b8de-dc02456762e1\" (UID: \"d3cada76-b876-4f41-b8de-dc02456762e1\") " Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.531849 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d3cada76-b876-4f41-b8de-dc02456762e1-inventory\") pod \"d3cada76-b876-4f41-b8de-dc02456762e1\" (UID: \"d3cada76-b876-4f41-b8de-dc02456762e1\") " Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.532574 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d3cada76-b876-4f41-b8de-dc02456762e1-ssh-key\") pod \"d3cada76-b876-4f41-b8de-dc02456762e1\" (UID: \"d3cada76-b876-4f41-b8de-dc02456762e1\") " Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.541387 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3cada76-b876-4f41-b8de-dc02456762e1-kube-api-access-jvsvc" (OuterVolumeSpecName: "kube-api-access-jvsvc") pod "d3cada76-b876-4f41-b8de-dc02456762e1" (UID: "d3cada76-b876-4f41-b8de-dc02456762e1"). InnerVolumeSpecName "kube-api-access-jvsvc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.571111 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3cada76-b876-4f41-b8de-dc02456762e1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d3cada76-b876-4f41-b8de-dc02456762e1" (UID: "d3cada76-b876-4f41-b8de-dc02456762e1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.580363 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3cada76-b876-4f41-b8de-dc02456762e1-inventory" (OuterVolumeSpecName: "inventory") pod "d3cada76-b876-4f41-b8de-dc02456762e1" (UID: "d3cada76-b876-4f41-b8de-dc02456762e1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.634677 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d3cada76-b876-4f41-b8de-dc02456762e1-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.634704 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d3cada76-b876-4f41-b8de-dc02456762e1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.634713 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvsvc\" (UniqueName: \"kubernetes.io/projected/d3cada76-b876-4f41-b8de-dc02456762e1-kube-api-access-jvsvc\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.841400 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" event={"ID":"d3cada76-b876-4f41-b8de-dc02456762e1","Type":"ContainerDied","Data":"9aaa1be4cd123036b760991fce6955f50423b8278722a44568e89e9207a79da0"} Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.842392 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9aaa1be4cd123036b760991fce6955f50423b8278722a44568e89e9207a79da0" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.842487 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-r8582" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.935017 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4"] Nov 25 10:22:36 crc kubenswrapper[4769]: E1125 10:22:36.950790 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3cada76-b876-4f41-b8de-dc02456762e1" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.950834 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3cada76-b876-4f41-b8de-dc02456762e1" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.951275 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3cada76-b876-4f41-b8de-dc02456762e1" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.952562 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.954520 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4"] Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.955793 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.956196 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.956580 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:22:36 crc kubenswrapper[4769]: I1125 10:22:36.956997 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:22:37 crc kubenswrapper[4769]: I1125 10:22:37.048160 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ad5b758-8fdd-4274-887d-34859eb94736-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4\" (UID: \"3ad5b758-8fdd-4274-887d-34859eb94736\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:37 crc kubenswrapper[4769]: I1125 10:22:37.048196 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ad5b758-8fdd-4274-887d-34859eb94736-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4\" (UID: \"3ad5b758-8fdd-4274-887d-34859eb94736\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:37 crc kubenswrapper[4769]: I1125 10:22:37.048572 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcmqf\" (UniqueName: \"kubernetes.io/projected/3ad5b758-8fdd-4274-887d-34859eb94736-kube-api-access-lcmqf\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4\" (UID: \"3ad5b758-8fdd-4274-887d-34859eb94736\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:37 crc kubenswrapper[4769]: I1125 10:22:37.150461 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ad5b758-8fdd-4274-887d-34859eb94736-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4\" (UID: \"3ad5b758-8fdd-4274-887d-34859eb94736\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:37 crc kubenswrapper[4769]: I1125 10:22:37.150530 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ad5b758-8fdd-4274-887d-34859eb94736-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4\" (UID: \"3ad5b758-8fdd-4274-887d-34859eb94736\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:37 crc kubenswrapper[4769]: I1125 10:22:37.150669 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcmqf\" (UniqueName: \"kubernetes.io/projected/3ad5b758-8fdd-4274-887d-34859eb94736-kube-api-access-lcmqf\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4\" (UID: \"3ad5b758-8fdd-4274-887d-34859eb94736\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:37 crc kubenswrapper[4769]: I1125 10:22:37.156165 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ad5b758-8fdd-4274-887d-34859eb94736-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4\" (UID: \"3ad5b758-8fdd-4274-887d-34859eb94736\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:37 crc kubenswrapper[4769]: I1125 10:22:37.156868 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ad5b758-8fdd-4274-887d-34859eb94736-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4\" (UID: \"3ad5b758-8fdd-4274-887d-34859eb94736\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:37 crc kubenswrapper[4769]: I1125 10:22:37.179678 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcmqf\" (UniqueName: \"kubernetes.io/projected/3ad5b758-8fdd-4274-887d-34859eb94736-kube-api-access-lcmqf\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4\" (UID: \"3ad5b758-8fdd-4274-887d-34859eb94736\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:37 crc kubenswrapper[4769]: I1125 10:22:37.290871 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:37 crc kubenswrapper[4769]: I1125 10:22:37.937547 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4"] Nov 25 10:22:37 crc kubenswrapper[4769]: W1125 10:22:37.944027 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ad5b758_8fdd_4274_887d_34859eb94736.slice/crio-cbca9b3ff3cb9552d5fce91dc2aee3a82c8ac1c323d6d958053785a9048a71a2 WatchSource:0}: Error finding container cbca9b3ff3cb9552d5fce91dc2aee3a82c8ac1c323d6d958053785a9048a71a2: Status 404 returned error can't find the container with id cbca9b3ff3cb9552d5fce91dc2aee3a82c8ac1c323d6d958053785a9048a71a2 Nov 25 10:22:38 crc kubenswrapper[4769]: I1125 10:22:38.121936 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:38 crc kubenswrapper[4769]: I1125 10:22:38.121994 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:38 crc kubenswrapper[4769]: I1125 10:22:38.176778 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:38 crc kubenswrapper[4769]: I1125 10:22:38.866798 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" event={"ID":"3ad5b758-8fdd-4274-887d-34859eb94736","Type":"ContainerStarted","Data":"cbca9b3ff3cb9552d5fce91dc2aee3a82c8ac1c323d6d958053785a9048a71a2"} Nov 25 10:22:38 crc kubenswrapper[4769]: I1125 10:22:38.917181 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:38 crc kubenswrapper[4769]: I1125 10:22:38.973545 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7q7bz"] Nov 25 10:22:39 crc kubenswrapper[4769]: I1125 10:22:39.878889 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" event={"ID":"3ad5b758-8fdd-4274-887d-34859eb94736","Type":"ContainerStarted","Data":"722e224c2d3707ad1605372a2d357ad9341f4635e6ed2b0635781be9baff9fcd"} Nov 25 10:22:39 crc kubenswrapper[4769]: I1125 10:22:39.897641 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" podStartSLOduration=3.318857234 podStartE2EDuration="3.897615904s" podCreationTimestamp="2025-11-25 10:22:36 +0000 UTC" firstStartedPulling="2025-11-25 10:22:37.948736168 +0000 UTC m=+2306.533708481" lastFinishedPulling="2025-11-25 10:22:38.527494808 +0000 UTC m=+2307.112467151" observedRunningTime="2025-11-25 10:22:39.895560169 +0000 UTC m=+2308.480532492" watchObservedRunningTime="2025-11-25 10:22:39.897615904 +0000 UTC m=+2308.482588247" Nov 25 10:22:40 crc kubenswrapper[4769]: I1125 10:22:40.891671 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7q7bz" podUID="66bf18d5-d135-4e7d-8dde-62a8988b46c6" containerName="registry-server" containerID="cri-o://60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0" gracePeriod=2 Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.601124 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.668469 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6wtm\" (UniqueName: \"kubernetes.io/projected/66bf18d5-d135-4e7d-8dde-62a8988b46c6-kube-api-access-j6wtm\") pod \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\" (UID: \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\") " Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.668686 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66bf18d5-d135-4e7d-8dde-62a8988b46c6-utilities\") pod \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\" (UID: \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\") " Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.668827 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66bf18d5-d135-4e7d-8dde-62a8988b46c6-catalog-content\") pod \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\" (UID: \"66bf18d5-d135-4e7d-8dde-62a8988b46c6\") " Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.670234 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66bf18d5-d135-4e7d-8dde-62a8988b46c6-utilities" (OuterVolumeSpecName: "utilities") pod "66bf18d5-d135-4e7d-8dde-62a8988b46c6" (UID: "66bf18d5-d135-4e7d-8dde-62a8988b46c6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.677278 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66bf18d5-d135-4e7d-8dde-62a8988b46c6-kube-api-access-j6wtm" (OuterVolumeSpecName: "kube-api-access-j6wtm") pod "66bf18d5-d135-4e7d-8dde-62a8988b46c6" (UID: "66bf18d5-d135-4e7d-8dde-62a8988b46c6"). InnerVolumeSpecName "kube-api-access-j6wtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.772179 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6wtm\" (UniqueName: \"kubernetes.io/projected/66bf18d5-d135-4e7d-8dde-62a8988b46c6-kube-api-access-j6wtm\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.772216 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66bf18d5-d135-4e7d-8dde-62a8988b46c6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.791286 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66bf18d5-d135-4e7d-8dde-62a8988b46c6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66bf18d5-d135-4e7d-8dde-62a8988b46c6" (UID: "66bf18d5-d135-4e7d-8dde-62a8988b46c6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.875374 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66bf18d5-d135-4e7d-8dde-62a8988b46c6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.911498 4769 generic.go:334] "Generic (PLEG): container finished" podID="66bf18d5-d135-4e7d-8dde-62a8988b46c6" containerID="60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0" exitCode=0 Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.911610 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7q7bz" event={"ID":"66bf18d5-d135-4e7d-8dde-62a8988b46c6","Type":"ContainerDied","Data":"60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0"} Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.911628 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7q7bz" Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.911652 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7q7bz" event={"ID":"66bf18d5-d135-4e7d-8dde-62a8988b46c6","Type":"ContainerDied","Data":"73645827e43c786b148b0091a02b4b1549ff076e85af6b22bcb7ada86ee8a8f2"} Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.911669 4769 scope.go:117] "RemoveContainer" containerID="60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0" Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.971161 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7q7bz"] Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.976096 4769 scope.go:117] "RemoveContainer" containerID="ba8d8850d77a436165a83868d9b2e09a8d54aca842e4c69e71ea252258e55f76" Nov 25 10:22:41 crc kubenswrapper[4769]: I1125 10:22:41.981327 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7q7bz"] Nov 25 10:22:42 crc kubenswrapper[4769]: I1125 10:22:42.014648 4769 scope.go:117] "RemoveContainer" containerID="54c02b08570aa8e84ea671a38a1cf1825a7ff8aae976a358e5bcc446d534e06a" Nov 25 10:22:42 crc kubenswrapper[4769]: I1125 10:22:42.077510 4769 scope.go:117] "RemoveContainer" containerID="60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0" Nov 25 10:22:42 crc kubenswrapper[4769]: E1125 10:22:42.078474 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0\": container with ID starting with 60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0 not found: ID does not exist" containerID="60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0" Nov 25 10:22:42 crc kubenswrapper[4769]: I1125 10:22:42.078516 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0"} err="failed to get container status \"60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0\": rpc error: code = NotFound desc = could not find container \"60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0\": container with ID starting with 60ef3df9b2af058ecc5ab4f57f297cffc7262da1b0e7e34ef0e675f79e6aabb0 not found: ID does not exist" Nov 25 10:22:42 crc kubenswrapper[4769]: I1125 10:22:42.078543 4769 scope.go:117] "RemoveContainer" containerID="ba8d8850d77a436165a83868d9b2e09a8d54aca842e4c69e71ea252258e55f76" Nov 25 10:22:42 crc kubenswrapper[4769]: E1125 10:22:42.080605 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba8d8850d77a436165a83868d9b2e09a8d54aca842e4c69e71ea252258e55f76\": container with ID starting with ba8d8850d77a436165a83868d9b2e09a8d54aca842e4c69e71ea252258e55f76 not found: ID does not exist" containerID="ba8d8850d77a436165a83868d9b2e09a8d54aca842e4c69e71ea252258e55f76" Nov 25 10:22:42 crc kubenswrapper[4769]: I1125 10:22:42.080643 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba8d8850d77a436165a83868d9b2e09a8d54aca842e4c69e71ea252258e55f76"} err="failed to get container status \"ba8d8850d77a436165a83868d9b2e09a8d54aca842e4c69e71ea252258e55f76\": rpc error: code = NotFound desc = could not find container \"ba8d8850d77a436165a83868d9b2e09a8d54aca842e4c69e71ea252258e55f76\": container with ID starting with ba8d8850d77a436165a83868d9b2e09a8d54aca842e4c69e71ea252258e55f76 not found: ID does not exist" Nov 25 10:22:42 crc kubenswrapper[4769]: I1125 10:22:42.080662 4769 scope.go:117] "RemoveContainer" containerID="54c02b08570aa8e84ea671a38a1cf1825a7ff8aae976a358e5bcc446d534e06a" Nov 25 10:22:42 crc kubenswrapper[4769]: E1125 10:22:42.081126 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54c02b08570aa8e84ea671a38a1cf1825a7ff8aae976a358e5bcc446d534e06a\": container with ID starting with 54c02b08570aa8e84ea671a38a1cf1825a7ff8aae976a358e5bcc446d534e06a not found: ID does not exist" containerID="54c02b08570aa8e84ea671a38a1cf1825a7ff8aae976a358e5bcc446d534e06a" Nov 25 10:22:42 crc kubenswrapper[4769]: I1125 10:22:42.081187 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54c02b08570aa8e84ea671a38a1cf1825a7ff8aae976a358e5bcc446d534e06a"} err="failed to get container status \"54c02b08570aa8e84ea671a38a1cf1825a7ff8aae976a358e5bcc446d534e06a\": rpc error: code = NotFound desc = could not find container \"54c02b08570aa8e84ea671a38a1cf1825a7ff8aae976a358e5bcc446d534e06a\": container with ID starting with 54c02b08570aa8e84ea671a38a1cf1825a7ff8aae976a358e5bcc446d534e06a not found: ID does not exist" Nov 25 10:22:42 crc kubenswrapper[4769]: I1125 10:22:42.254333 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66bf18d5-d135-4e7d-8dde-62a8988b46c6" path="/var/lib/kubelet/pods/66bf18d5-d135-4e7d-8dde-62a8988b46c6/volumes" Nov 25 10:22:47 crc kubenswrapper[4769]: I1125 10:22:47.566002 4769 scope.go:117] "RemoveContainer" containerID="d522547551044cba146372aa9d9dcd78b1746a080a69d63ba20931ef2cd04269" Nov 25 10:22:48 crc kubenswrapper[4769]: I1125 10:22:48.237600 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:22:48 crc kubenswrapper[4769]: E1125 10:22:48.238406 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:22:50 crc kubenswrapper[4769]: I1125 10:22:50.026957 4769 generic.go:334] "Generic (PLEG): container finished" podID="3ad5b758-8fdd-4274-887d-34859eb94736" containerID="722e224c2d3707ad1605372a2d357ad9341f4635e6ed2b0635781be9baff9fcd" exitCode=0 Nov 25 10:22:50 crc kubenswrapper[4769]: I1125 10:22:50.027151 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" event={"ID":"3ad5b758-8fdd-4274-887d-34859eb94736","Type":"ContainerDied","Data":"722e224c2d3707ad1605372a2d357ad9341f4635e6ed2b0635781be9baff9fcd"} Nov 25 10:22:51 crc kubenswrapper[4769]: I1125 10:22:51.624697 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:51 crc kubenswrapper[4769]: I1125 10:22:51.755255 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lcmqf\" (UniqueName: \"kubernetes.io/projected/3ad5b758-8fdd-4274-887d-34859eb94736-kube-api-access-lcmqf\") pod \"3ad5b758-8fdd-4274-887d-34859eb94736\" (UID: \"3ad5b758-8fdd-4274-887d-34859eb94736\") " Nov 25 10:22:51 crc kubenswrapper[4769]: I1125 10:22:51.755514 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ad5b758-8fdd-4274-887d-34859eb94736-ssh-key\") pod \"3ad5b758-8fdd-4274-887d-34859eb94736\" (UID: \"3ad5b758-8fdd-4274-887d-34859eb94736\") " Nov 25 10:22:51 crc kubenswrapper[4769]: I1125 10:22:51.755638 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ad5b758-8fdd-4274-887d-34859eb94736-inventory\") pod \"3ad5b758-8fdd-4274-887d-34859eb94736\" (UID: \"3ad5b758-8fdd-4274-887d-34859eb94736\") " Nov 25 10:22:51 crc kubenswrapper[4769]: I1125 10:22:51.763182 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ad5b758-8fdd-4274-887d-34859eb94736-kube-api-access-lcmqf" (OuterVolumeSpecName: "kube-api-access-lcmqf") pod "3ad5b758-8fdd-4274-887d-34859eb94736" (UID: "3ad5b758-8fdd-4274-887d-34859eb94736"). InnerVolumeSpecName "kube-api-access-lcmqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:22:51 crc kubenswrapper[4769]: I1125 10:22:51.807337 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad5b758-8fdd-4274-887d-34859eb94736-inventory" (OuterVolumeSpecName: "inventory") pod "3ad5b758-8fdd-4274-887d-34859eb94736" (UID: "3ad5b758-8fdd-4274-887d-34859eb94736"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:51 crc kubenswrapper[4769]: I1125 10:22:51.834343 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ad5b758-8fdd-4274-887d-34859eb94736-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3ad5b758-8fdd-4274-887d-34859eb94736" (UID: "3ad5b758-8fdd-4274-887d-34859eb94736"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:51 crc kubenswrapper[4769]: I1125 10:22:51.859625 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3ad5b758-8fdd-4274-887d-34859eb94736-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:51 crc kubenswrapper[4769]: I1125 10:22:51.859695 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3ad5b758-8fdd-4274-887d-34859eb94736-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:51 crc kubenswrapper[4769]: I1125 10:22:51.859708 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lcmqf\" (UniqueName: \"kubernetes.io/projected/3ad5b758-8fdd-4274-887d-34859eb94736-kube-api-access-lcmqf\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.059407 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" event={"ID":"3ad5b758-8fdd-4274-887d-34859eb94736","Type":"ContainerDied","Data":"cbca9b3ff3cb9552d5fce91dc2aee3a82c8ac1c323d6d958053785a9048a71a2"} Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.059510 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.059536 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cbca9b3ff3cb9552d5fce91dc2aee3a82c8ac1c323d6d958053785a9048a71a2" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.144039 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j"] Nov 25 10:22:52 crc kubenswrapper[4769]: E1125 10:22:52.144567 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66bf18d5-d135-4e7d-8dde-62a8988b46c6" containerName="registry-server" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.144590 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="66bf18d5-d135-4e7d-8dde-62a8988b46c6" containerName="registry-server" Nov 25 10:22:52 crc kubenswrapper[4769]: E1125 10:22:52.144625 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66bf18d5-d135-4e7d-8dde-62a8988b46c6" containerName="extract-content" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.144634 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="66bf18d5-d135-4e7d-8dde-62a8988b46c6" containerName="extract-content" Nov 25 10:22:52 crc kubenswrapper[4769]: E1125 10:22:52.144649 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ad5b758-8fdd-4274-887d-34859eb94736" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.144659 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ad5b758-8fdd-4274-887d-34859eb94736" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:22:52 crc kubenswrapper[4769]: E1125 10:22:52.144688 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66bf18d5-d135-4e7d-8dde-62a8988b46c6" containerName="extract-utilities" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.144696 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="66bf18d5-d135-4e7d-8dde-62a8988b46c6" containerName="extract-utilities" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.145003 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="66bf18d5-d135-4e7d-8dde-62a8988b46c6" containerName="registry-server" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.145034 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ad5b758-8fdd-4274-887d-34859eb94736" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.145933 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.156129 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.156329 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.157526 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.157588 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.158002 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.158250 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.158324 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.158559 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.159239 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j"] Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.160741 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.270388 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.270451 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.270624 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.270674 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.270708 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzbqs\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-kube-api-access-lzbqs\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.270739 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.270766 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.270874 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.271010 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.271072 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.271115 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.271142 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.271259 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.271307 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.271334 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.271382 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.374352 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.374525 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.374591 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.374631 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.374786 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.374833 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.374877 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.374946 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.375092 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.375149 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.375288 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.375776 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.375837 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzbqs\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-kube-api-access-lzbqs\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.375882 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.375919 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.376104 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.379250 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.379569 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.381855 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.382265 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.382560 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.382912 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.383255 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.383671 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.383938 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.384147 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.384828 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.385196 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.386559 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.387332 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.387503 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.398388 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzbqs\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-kube-api-access-lzbqs\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:52 crc kubenswrapper[4769]: I1125 10:22:52.466071 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:22:53 crc kubenswrapper[4769]: I1125 10:22:53.118477 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j"] Nov 25 10:22:54 crc kubenswrapper[4769]: I1125 10:22:54.087031 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" event={"ID":"cfbf9a07-a076-48a8-a458-743efaef316d","Type":"ContainerStarted","Data":"aff35c7581729e1303de93334dd00372f109c4f45260c9616276454c186ab0ad"} Nov 25 10:22:54 crc kubenswrapper[4769]: I1125 10:22:54.087794 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" event={"ID":"cfbf9a07-a076-48a8-a458-743efaef316d","Type":"ContainerStarted","Data":"73ccbb235d3af169d6a78e179cf8fb7c4e855a44883eb74891f288b4c7c0bfa9"} Nov 25 10:22:54 crc kubenswrapper[4769]: I1125 10:22:54.114476 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" podStartSLOduration=1.693338341 podStartE2EDuration="2.114445853s" podCreationTimestamp="2025-11-25 10:22:52 +0000 UTC" firstStartedPulling="2025-11-25 10:22:53.134991796 +0000 UTC m=+2321.719964109" lastFinishedPulling="2025-11-25 10:22:53.556099308 +0000 UTC m=+2322.141071621" observedRunningTime="2025-11-25 10:22:54.10610814 +0000 UTC m=+2322.691080453" watchObservedRunningTime="2025-11-25 10:22:54.114445853 +0000 UTC m=+2322.699418186" Nov 25 10:23:03 crc kubenswrapper[4769]: I1125 10:23:03.238618 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:23:03 crc kubenswrapper[4769]: E1125 10:23:03.239623 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:23:17 crc kubenswrapper[4769]: I1125 10:23:17.238208 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:23:17 crc kubenswrapper[4769]: E1125 10:23:17.240689 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:23:29 crc kubenswrapper[4769]: I1125 10:23:29.237471 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:23:29 crc kubenswrapper[4769]: E1125 10:23:29.238465 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.370128 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-d75lq"] Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.374112 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.393757 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lstcv\" (UniqueName: \"kubernetes.io/projected/94a5185d-3f16-48ba-91ee-97c4f153ad27-kube-api-access-lstcv\") pod \"redhat-marketplace-d75lq\" (UID: \"94a5185d-3f16-48ba-91ee-97c4f153ad27\") " pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.393879 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94a5185d-3f16-48ba-91ee-97c4f153ad27-utilities\") pod \"redhat-marketplace-d75lq\" (UID: \"94a5185d-3f16-48ba-91ee-97c4f153ad27\") " pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.393916 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94a5185d-3f16-48ba-91ee-97c4f153ad27-catalog-content\") pod \"redhat-marketplace-d75lq\" (UID: \"94a5185d-3f16-48ba-91ee-97c4f153ad27\") " pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.409422 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d75lq"] Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.497583 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lstcv\" (UniqueName: \"kubernetes.io/projected/94a5185d-3f16-48ba-91ee-97c4f153ad27-kube-api-access-lstcv\") pod \"redhat-marketplace-d75lq\" (UID: \"94a5185d-3f16-48ba-91ee-97c4f153ad27\") " pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.497664 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94a5185d-3f16-48ba-91ee-97c4f153ad27-utilities\") pod \"redhat-marketplace-d75lq\" (UID: \"94a5185d-3f16-48ba-91ee-97c4f153ad27\") " pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.497706 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94a5185d-3f16-48ba-91ee-97c4f153ad27-catalog-content\") pod \"redhat-marketplace-d75lq\" (UID: \"94a5185d-3f16-48ba-91ee-97c4f153ad27\") " pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.498152 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94a5185d-3f16-48ba-91ee-97c4f153ad27-utilities\") pod \"redhat-marketplace-d75lq\" (UID: \"94a5185d-3f16-48ba-91ee-97c4f153ad27\") " pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.498229 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94a5185d-3f16-48ba-91ee-97c4f153ad27-catalog-content\") pod \"redhat-marketplace-d75lq\" (UID: \"94a5185d-3f16-48ba-91ee-97c4f153ad27\") " pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.530053 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lstcv\" (UniqueName: \"kubernetes.io/projected/94a5185d-3f16-48ba-91ee-97c4f153ad27-kube-api-access-lstcv\") pod \"redhat-marketplace-d75lq\" (UID: \"94a5185d-3f16-48ba-91ee-97c4f153ad27\") " pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:31 crc kubenswrapper[4769]: I1125 10:23:31.713995 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:32 crc kubenswrapper[4769]: I1125 10:23:32.202107 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-d75lq"] Nov 25 10:23:32 crc kubenswrapper[4769]: I1125 10:23:32.552905 4769 generic.go:334] "Generic (PLEG): container finished" podID="94a5185d-3f16-48ba-91ee-97c4f153ad27" containerID="50730af583309cd266928cf05422d54ae29a164bb39fbec596d6bc696fa72910" exitCode=0 Nov 25 10:23:32 crc kubenswrapper[4769]: I1125 10:23:32.553012 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d75lq" event={"ID":"94a5185d-3f16-48ba-91ee-97c4f153ad27","Type":"ContainerDied","Data":"50730af583309cd266928cf05422d54ae29a164bb39fbec596d6bc696fa72910"} Nov 25 10:23:32 crc kubenswrapper[4769]: I1125 10:23:32.554642 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d75lq" event={"ID":"94a5185d-3f16-48ba-91ee-97c4f153ad27","Type":"ContainerStarted","Data":"506b95bdb8297ce2b6e86f202e86a6d43acf8e7ff05db6732310fdb793f643a3"} Nov 25 10:23:34 crc kubenswrapper[4769]: I1125 10:23:34.580006 4769 generic.go:334] "Generic (PLEG): container finished" podID="94a5185d-3f16-48ba-91ee-97c4f153ad27" containerID="70c6d860c0b914b1119f736a36a4e598022228dca5b58794011e3c5eaf47d93a" exitCode=0 Nov 25 10:23:34 crc kubenswrapper[4769]: I1125 10:23:34.580325 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d75lq" event={"ID":"94a5185d-3f16-48ba-91ee-97c4f153ad27","Type":"ContainerDied","Data":"70c6d860c0b914b1119f736a36a4e598022228dca5b58794011e3c5eaf47d93a"} Nov 25 10:23:35 crc kubenswrapper[4769]: I1125 10:23:35.595769 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d75lq" event={"ID":"94a5185d-3f16-48ba-91ee-97c4f153ad27","Type":"ContainerStarted","Data":"c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46"} Nov 25 10:23:35 crc kubenswrapper[4769]: I1125 10:23:35.618306 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-d75lq" podStartSLOduration=2.187019557 podStartE2EDuration="4.61828147s" podCreationTimestamp="2025-11-25 10:23:31 +0000 UTC" firstStartedPulling="2025-11-25 10:23:32.555067947 +0000 UTC m=+2361.140040260" lastFinishedPulling="2025-11-25 10:23:34.98632986 +0000 UTC m=+2363.571302173" observedRunningTime="2025-11-25 10:23:35.611567161 +0000 UTC m=+2364.196539474" watchObservedRunningTime="2025-11-25 10:23:35.61828147 +0000 UTC m=+2364.203253783" Nov 25 10:23:41 crc kubenswrapper[4769]: I1125 10:23:41.238176 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:23:41 crc kubenswrapper[4769]: E1125 10:23:41.239298 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:23:41 crc kubenswrapper[4769]: I1125 10:23:41.705484 4769 generic.go:334] "Generic (PLEG): container finished" podID="cfbf9a07-a076-48a8-a458-743efaef316d" containerID="aff35c7581729e1303de93334dd00372f109c4f45260c9616276454c186ab0ad" exitCode=0 Nov 25 10:23:41 crc kubenswrapper[4769]: I1125 10:23:41.705573 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" event={"ID":"cfbf9a07-a076-48a8-a458-743efaef316d","Type":"ContainerDied","Data":"aff35c7581729e1303de93334dd00372f109c4f45260c9616276454c186ab0ad"} Nov 25 10:23:41 crc kubenswrapper[4769]: I1125 10:23:41.714913 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:41 crc kubenswrapper[4769]: I1125 10:23:41.714947 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:41 crc kubenswrapper[4769]: I1125 10:23:41.798148 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:42 crc kubenswrapper[4769]: I1125 10:23:42.800903 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:42 crc kubenswrapper[4769]: I1125 10:23:42.875277 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d75lq"] Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.230071 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.279790 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-telemetry-combined-ca-bundle\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.279990 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-nova-combined-ca-bundle\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.280075 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.280135 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzbqs\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-kube-api-access-lzbqs\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.280209 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-ovn-combined-ca-bundle\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.281158 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.281217 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-inventory\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.281241 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-ovn-default-certs-0\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.281268 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.281318 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-ssh-key\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.281345 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-neutron-metadata-combined-ca-bundle\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.281390 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-repo-setup-combined-ca-bundle\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.281437 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-bootstrap-combined-ca-bundle\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.281485 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-telemetry-power-monitoring-combined-ca-bundle\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.281581 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-libvirt-combined-ca-bundle\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.281685 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"cfbf9a07-a076-48a8-a458-743efaef316d\" (UID: \"cfbf9a07-a076-48a8-a458-743efaef316d\") " Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.286847 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.286859 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-kube-api-access-lzbqs" (OuterVolumeSpecName: "kube-api-access-lzbqs") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "kube-api-access-lzbqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.287873 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.289071 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.289440 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.290326 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.291198 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.293809 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.293869 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.294078 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.294112 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.294590 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.298871 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.311014 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.321912 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-inventory" (OuterVolumeSpecName: "inventory") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.328664 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cfbf9a07-a076-48a8-a458-743efaef316d" (UID: "cfbf9a07-a076-48a8-a458-743efaef316d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.385892 4769 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386248 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386263 4769 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386282 4769 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386297 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386411 4769 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386428 4769 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386441 4769 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386454 4769 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386467 4769 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386479 4769 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386496 4769 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386509 4769 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386520 4769 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386532 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzbqs\" (UniqueName: \"kubernetes.io/projected/cfbf9a07-a076-48a8-a458-743efaef316d-kube-api-access-lzbqs\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.386544 4769 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfbf9a07-a076-48a8-a458-743efaef316d-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.736566 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" event={"ID":"cfbf9a07-a076-48a8-a458-743efaef316d","Type":"ContainerDied","Data":"73ccbb235d3af169d6a78e179cf8fb7c4e855a44883eb74891f288b4c7c0bfa9"} Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.736691 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73ccbb235d3af169d6a78e179cf8fb7c4e855a44883eb74891f288b4c7c0bfa9" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.736629 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.873985 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm"] Nov 25 10:23:43 crc kubenswrapper[4769]: E1125 10:23:43.874577 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfbf9a07-a076-48a8-a458-743efaef316d" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.874595 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfbf9a07-a076-48a8-a458-743efaef316d" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.874871 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfbf9a07-a076-48a8-a458-743efaef316d" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.876650 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.879710 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.879775 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.879727 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.879934 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.880051 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:23:43 crc kubenswrapper[4769]: I1125 10:23:43.901355 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm"] Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.008290 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.008346 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/92aecc12-4568-4b69-bda9-58101a2b6083-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.008416 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.008498 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.009118 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mt6st\" (UniqueName: \"kubernetes.io/projected/92aecc12-4568-4b69-bda9-58101a2b6083-kube-api-access-mt6st\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.111589 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.111693 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/92aecc12-4568-4b69-bda9-58101a2b6083-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.111751 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.111838 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.111943 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mt6st\" (UniqueName: \"kubernetes.io/projected/92aecc12-4568-4b69-bda9-58101a2b6083-kube-api-access-mt6st\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.112793 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/92aecc12-4568-4b69-bda9-58101a2b6083-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.117768 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.119540 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.128112 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.128115 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mt6st\" (UniqueName: \"kubernetes.io/projected/92aecc12-4568-4b69-bda9-58101a2b6083-kube-api-access-mt6st\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dxprm\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.209924 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.749113 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-d75lq" podUID="94a5185d-3f16-48ba-91ee-97c4f153ad27" containerName="registry-server" containerID="cri-o://c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46" gracePeriod=2 Nov 25 10:23:44 crc kubenswrapper[4769]: I1125 10:23:44.833991 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm"] Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.151471 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.239056 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lstcv\" (UniqueName: \"kubernetes.io/projected/94a5185d-3f16-48ba-91ee-97c4f153ad27-kube-api-access-lstcv\") pod \"94a5185d-3f16-48ba-91ee-97c4f153ad27\" (UID: \"94a5185d-3f16-48ba-91ee-97c4f153ad27\") " Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.239320 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94a5185d-3f16-48ba-91ee-97c4f153ad27-utilities\") pod \"94a5185d-3f16-48ba-91ee-97c4f153ad27\" (UID: \"94a5185d-3f16-48ba-91ee-97c4f153ad27\") " Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.239367 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94a5185d-3f16-48ba-91ee-97c4f153ad27-catalog-content\") pod \"94a5185d-3f16-48ba-91ee-97c4f153ad27\" (UID: \"94a5185d-3f16-48ba-91ee-97c4f153ad27\") " Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.240203 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94a5185d-3f16-48ba-91ee-97c4f153ad27-utilities" (OuterVolumeSpecName: "utilities") pod "94a5185d-3f16-48ba-91ee-97c4f153ad27" (UID: "94a5185d-3f16-48ba-91ee-97c4f153ad27"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.246645 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94a5185d-3f16-48ba-91ee-97c4f153ad27-kube-api-access-lstcv" (OuterVolumeSpecName: "kube-api-access-lstcv") pod "94a5185d-3f16-48ba-91ee-97c4f153ad27" (UID: "94a5185d-3f16-48ba-91ee-97c4f153ad27"). InnerVolumeSpecName "kube-api-access-lstcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.260706 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94a5185d-3f16-48ba-91ee-97c4f153ad27-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94a5185d-3f16-48ba-91ee-97c4f153ad27" (UID: "94a5185d-3f16-48ba-91ee-97c4f153ad27"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.346841 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94a5185d-3f16-48ba-91ee-97c4f153ad27-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.346882 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lstcv\" (UniqueName: \"kubernetes.io/projected/94a5185d-3f16-48ba-91ee-97c4f153ad27-kube-api-access-lstcv\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.346900 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94a5185d-3f16-48ba-91ee-97c4f153ad27-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.767055 4769 generic.go:334] "Generic (PLEG): container finished" podID="94a5185d-3f16-48ba-91ee-97c4f153ad27" containerID="c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46" exitCode=0 Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.767324 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-d75lq" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.767250 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d75lq" event={"ID":"94a5185d-3f16-48ba-91ee-97c4f153ad27","Type":"ContainerDied","Data":"c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46"} Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.767389 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-d75lq" event={"ID":"94a5185d-3f16-48ba-91ee-97c4f153ad27","Type":"ContainerDied","Data":"506b95bdb8297ce2b6e86f202e86a6d43acf8e7ff05db6732310fdb793f643a3"} Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.767411 4769 scope.go:117] "RemoveContainer" containerID="c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.772348 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" event={"ID":"92aecc12-4568-4b69-bda9-58101a2b6083","Type":"ContainerStarted","Data":"def7adbedc07aab78bc5732d4027321d16615d3f91f3cb70ad99638a39e503d3"} Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.819627 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-d75lq"] Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.822114 4769 scope.go:117] "RemoveContainer" containerID="70c6d860c0b914b1119f736a36a4e598022228dca5b58794011e3c5eaf47d93a" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.829202 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-d75lq"] Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.849592 4769 scope.go:117] "RemoveContainer" containerID="50730af583309cd266928cf05422d54ae29a164bb39fbec596d6bc696fa72910" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.869631 4769 scope.go:117] "RemoveContainer" containerID="c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46" Nov 25 10:23:45 crc kubenswrapper[4769]: E1125 10:23:45.870137 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46\": container with ID starting with c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46 not found: ID does not exist" containerID="c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.870171 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46"} err="failed to get container status \"c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46\": rpc error: code = NotFound desc = could not find container \"c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46\": container with ID starting with c108e1c42ea601bb07963a47a0275f5fce91b35ed12f80c030e4890dadb03c46 not found: ID does not exist" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.870194 4769 scope.go:117] "RemoveContainer" containerID="70c6d860c0b914b1119f736a36a4e598022228dca5b58794011e3c5eaf47d93a" Nov 25 10:23:45 crc kubenswrapper[4769]: E1125 10:23:45.870517 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70c6d860c0b914b1119f736a36a4e598022228dca5b58794011e3c5eaf47d93a\": container with ID starting with 70c6d860c0b914b1119f736a36a4e598022228dca5b58794011e3c5eaf47d93a not found: ID does not exist" containerID="70c6d860c0b914b1119f736a36a4e598022228dca5b58794011e3c5eaf47d93a" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.870547 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70c6d860c0b914b1119f736a36a4e598022228dca5b58794011e3c5eaf47d93a"} err="failed to get container status \"70c6d860c0b914b1119f736a36a4e598022228dca5b58794011e3c5eaf47d93a\": rpc error: code = NotFound desc = could not find container \"70c6d860c0b914b1119f736a36a4e598022228dca5b58794011e3c5eaf47d93a\": container with ID starting with 70c6d860c0b914b1119f736a36a4e598022228dca5b58794011e3c5eaf47d93a not found: ID does not exist" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.870563 4769 scope.go:117] "RemoveContainer" containerID="50730af583309cd266928cf05422d54ae29a164bb39fbec596d6bc696fa72910" Nov 25 10:23:45 crc kubenswrapper[4769]: E1125 10:23:45.870765 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50730af583309cd266928cf05422d54ae29a164bb39fbec596d6bc696fa72910\": container with ID starting with 50730af583309cd266928cf05422d54ae29a164bb39fbec596d6bc696fa72910 not found: ID does not exist" containerID="50730af583309cd266928cf05422d54ae29a164bb39fbec596d6bc696fa72910" Nov 25 10:23:45 crc kubenswrapper[4769]: I1125 10:23:45.870786 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50730af583309cd266928cf05422d54ae29a164bb39fbec596d6bc696fa72910"} err="failed to get container status \"50730af583309cd266928cf05422d54ae29a164bb39fbec596d6bc696fa72910\": rpc error: code = NotFound desc = could not find container \"50730af583309cd266928cf05422d54ae29a164bb39fbec596d6bc696fa72910\": container with ID starting with 50730af583309cd266928cf05422d54ae29a164bb39fbec596d6bc696fa72910 not found: ID does not exist" Nov 25 10:23:46 crc kubenswrapper[4769]: I1125 10:23:46.249591 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94a5185d-3f16-48ba-91ee-97c4f153ad27" path="/var/lib/kubelet/pods/94a5185d-3f16-48ba-91ee-97c4f153ad27/volumes" Nov 25 10:23:46 crc kubenswrapper[4769]: I1125 10:23:46.785154 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" event={"ID":"92aecc12-4568-4b69-bda9-58101a2b6083","Type":"ContainerStarted","Data":"3fe39253951beacf278a383373e091d9fe0752eeb448ba7c8dfc08426830f4ef"} Nov 25 10:23:46 crc kubenswrapper[4769]: I1125 10:23:46.815713 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" podStartSLOduration=3.202908817 podStartE2EDuration="3.815689806s" podCreationTimestamp="2025-11-25 10:23:43 +0000 UTC" firstStartedPulling="2025-11-25 10:23:44.870284412 +0000 UTC m=+2373.455256725" lastFinishedPulling="2025-11-25 10:23:45.483065411 +0000 UTC m=+2374.068037714" observedRunningTime="2025-11-25 10:23:46.807421615 +0000 UTC m=+2375.392393928" watchObservedRunningTime="2025-11-25 10:23:46.815689806 +0000 UTC m=+2375.400662129" Nov 25 10:23:54 crc kubenswrapper[4769]: I1125 10:23:54.239943 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:23:54 crc kubenswrapper[4769]: E1125 10:23:54.240915 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:24:05 crc kubenswrapper[4769]: I1125 10:24:05.236975 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:24:05 crc kubenswrapper[4769]: E1125 10:24:05.237782 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:24:16 crc kubenswrapper[4769]: I1125 10:24:16.238575 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:24:16 crc kubenswrapper[4769]: E1125 10:24:16.239586 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:24:30 crc kubenswrapper[4769]: I1125 10:24:30.238581 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:24:30 crc kubenswrapper[4769]: E1125 10:24:30.242023 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:24:41 crc kubenswrapper[4769]: I1125 10:24:41.237605 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:24:41 crc kubenswrapper[4769]: E1125 10:24:41.238749 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:24:50 crc kubenswrapper[4769]: I1125 10:24:50.628448 4769 generic.go:334] "Generic (PLEG): container finished" podID="92aecc12-4568-4b69-bda9-58101a2b6083" containerID="3fe39253951beacf278a383373e091d9fe0752eeb448ba7c8dfc08426830f4ef" exitCode=0 Nov 25 10:24:50 crc kubenswrapper[4769]: I1125 10:24:50.628533 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" event={"ID":"92aecc12-4568-4b69-bda9-58101a2b6083","Type":"ContainerDied","Data":"3fe39253951beacf278a383373e091d9fe0752eeb448ba7c8dfc08426830f4ef"} Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.254493 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.356425 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-ovn-combined-ca-bundle\") pod \"92aecc12-4568-4b69-bda9-58101a2b6083\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.356797 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mt6st\" (UniqueName: \"kubernetes.io/projected/92aecc12-4568-4b69-bda9-58101a2b6083-kube-api-access-mt6st\") pod \"92aecc12-4568-4b69-bda9-58101a2b6083\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.356892 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-inventory\") pod \"92aecc12-4568-4b69-bda9-58101a2b6083\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.356956 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/92aecc12-4568-4b69-bda9-58101a2b6083-ovncontroller-config-0\") pod \"92aecc12-4568-4b69-bda9-58101a2b6083\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.357910 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-ssh-key\") pod \"92aecc12-4568-4b69-bda9-58101a2b6083\" (UID: \"92aecc12-4568-4b69-bda9-58101a2b6083\") " Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.373200 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92aecc12-4568-4b69-bda9-58101a2b6083-kube-api-access-mt6st" (OuterVolumeSpecName: "kube-api-access-mt6st") pod "92aecc12-4568-4b69-bda9-58101a2b6083" (UID: "92aecc12-4568-4b69-bda9-58101a2b6083"). InnerVolumeSpecName "kube-api-access-mt6st". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.374452 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "92aecc12-4568-4b69-bda9-58101a2b6083" (UID: "92aecc12-4568-4b69-bda9-58101a2b6083"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.386333 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92aecc12-4568-4b69-bda9-58101a2b6083-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "92aecc12-4568-4b69-bda9-58101a2b6083" (UID: "92aecc12-4568-4b69-bda9-58101a2b6083"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.389074 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-inventory" (OuterVolumeSpecName: "inventory") pod "92aecc12-4568-4b69-bda9-58101a2b6083" (UID: "92aecc12-4568-4b69-bda9-58101a2b6083"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.391506 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "92aecc12-4568-4b69-bda9-58101a2b6083" (UID: "92aecc12-4568-4b69-bda9-58101a2b6083"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.461566 4769 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.461625 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mt6st\" (UniqueName: \"kubernetes.io/projected/92aecc12-4568-4b69-bda9-58101a2b6083-kube-api-access-mt6st\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.461640 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.461653 4769 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/92aecc12-4568-4b69-bda9-58101a2b6083-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.461664 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/92aecc12-4568-4b69-bda9-58101a2b6083-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.664582 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" event={"ID":"92aecc12-4568-4b69-bda9-58101a2b6083","Type":"ContainerDied","Data":"def7adbedc07aab78bc5732d4027321d16615d3f91f3cb70ad99638a39e503d3"} Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.664632 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="def7adbedc07aab78bc5732d4027321d16615d3f91f3cb70ad99638a39e503d3" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.664676 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dxprm" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.869231 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75"] Nov 25 10:24:52 crc kubenswrapper[4769]: E1125 10:24:52.870706 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92aecc12-4568-4b69-bda9-58101a2b6083" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.870730 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="92aecc12-4568-4b69-bda9-58101a2b6083" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 10:24:52 crc kubenswrapper[4769]: E1125 10:24:52.870777 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94a5185d-3f16-48ba-91ee-97c4f153ad27" containerName="extract-content" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.870786 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="94a5185d-3f16-48ba-91ee-97c4f153ad27" containerName="extract-content" Nov 25 10:24:52 crc kubenswrapper[4769]: E1125 10:24:52.870815 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94a5185d-3f16-48ba-91ee-97c4f153ad27" containerName="extract-utilities" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.870823 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="94a5185d-3f16-48ba-91ee-97c4f153ad27" containerName="extract-utilities" Nov 25 10:24:52 crc kubenswrapper[4769]: E1125 10:24:52.870899 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94a5185d-3f16-48ba-91ee-97c4f153ad27" containerName="registry-server" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.870909 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="94a5185d-3f16-48ba-91ee-97c4f153ad27" containerName="registry-server" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.871556 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="92aecc12-4568-4b69-bda9-58101a2b6083" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.871611 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="94a5185d-3f16-48ba-91ee-97c4f153ad27" containerName="registry-server" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.873260 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.875461 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.876217 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.876264 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.876462 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.876657 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.877300 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.879044 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.879154 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.879185 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.879410 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.879457 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.879543 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzzbj\" (UniqueName: \"kubernetes.io/projected/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-kube-api-access-xzzbj\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.886206 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75"] Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.983494 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.983631 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.983724 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzzbj\" (UniqueName: \"kubernetes.io/projected/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-kube-api-access-xzzbj\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.983904 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.984016 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.984057 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.994441 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:52 crc kubenswrapper[4769]: I1125 10:24:52.994592 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:53 crc kubenswrapper[4769]: I1125 10:24:53.003681 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:53 crc kubenswrapper[4769]: I1125 10:24:53.004396 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:53 crc kubenswrapper[4769]: I1125 10:24:53.005518 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:53 crc kubenswrapper[4769]: I1125 10:24:53.005812 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzzbj\" (UniqueName: \"kubernetes.io/projected/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-kube-api-access-xzzbj\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:53 crc kubenswrapper[4769]: I1125 10:24:53.209933 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:24:53 crc kubenswrapper[4769]: I1125 10:24:53.824661 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75"] Nov 25 10:24:54 crc kubenswrapper[4769]: I1125 10:24:54.692254 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" event={"ID":"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba","Type":"ContainerStarted","Data":"e65f7b4afaf284c2e061814ecc43c203c603454bdccfcb4aa6c58b0031084997"} Nov 25 10:24:54 crc kubenswrapper[4769]: I1125 10:24:54.693219 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" event={"ID":"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba","Type":"ContainerStarted","Data":"11b1e0bc3625ff41c9aa26a7342db828e90327db0e58359318b94a9c43a31757"} Nov 25 10:24:54 crc kubenswrapper[4769]: I1125 10:24:54.726821 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" podStartSLOduration=2.259474752 podStartE2EDuration="2.726792457s" podCreationTimestamp="2025-11-25 10:24:52 +0000 UTC" firstStartedPulling="2025-11-25 10:24:53.837890168 +0000 UTC m=+2442.422862491" lastFinishedPulling="2025-11-25 10:24:54.305207883 +0000 UTC m=+2442.890180196" observedRunningTime="2025-11-25 10:24:54.713541004 +0000 UTC m=+2443.298513317" watchObservedRunningTime="2025-11-25 10:24:54.726792457 +0000 UTC m=+2443.311764800" Nov 25 10:24:56 crc kubenswrapper[4769]: I1125 10:24:56.238062 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:24:56 crc kubenswrapper[4769]: E1125 10:24:56.238846 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:25:07 crc kubenswrapper[4769]: I1125 10:25:07.237445 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:25:07 crc kubenswrapper[4769]: E1125 10:25:07.239177 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:25:18 crc kubenswrapper[4769]: I1125 10:25:18.238373 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:25:18 crc kubenswrapper[4769]: E1125 10:25:18.239401 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:25:29 crc kubenswrapper[4769]: I1125 10:25:29.237609 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:25:29 crc kubenswrapper[4769]: E1125 10:25:29.238585 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:25:43 crc kubenswrapper[4769]: I1125 10:25:43.238649 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:25:43 crc kubenswrapper[4769]: E1125 10:25:43.240298 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:25:44 crc kubenswrapper[4769]: I1125 10:25:44.310215 4769 generic.go:334] "Generic (PLEG): container finished" podID="84dc9d02-ba7e-4b1e-8b1b-24654611c9ba" containerID="e65f7b4afaf284c2e061814ecc43c203c603454bdccfcb4aa6c58b0031084997" exitCode=0 Nov 25 10:25:44 crc kubenswrapper[4769]: I1125 10:25:44.310334 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" event={"ID":"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba","Type":"ContainerDied","Data":"e65f7b4afaf284c2e061814ecc43c203c603454bdccfcb4aa6c58b0031084997"} Nov 25 10:25:45 crc kubenswrapper[4769]: I1125 10:25:45.973501 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.086192 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-inventory\") pod \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.086355 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzzbj\" (UniqueName: \"kubernetes.io/projected/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-kube-api-access-xzzbj\") pod \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.086585 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-neutron-metadata-combined-ca-bundle\") pod \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.086790 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-ssh-key\") pod \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.087692 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-nova-metadata-neutron-config-0\") pod \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.087986 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-neutron-ovn-metadata-agent-neutron-config-0\") pod \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\" (UID: \"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba\") " Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.094877 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-kube-api-access-xzzbj" (OuterVolumeSpecName: "kube-api-access-xzzbj") pod "84dc9d02-ba7e-4b1e-8b1b-24654611c9ba" (UID: "84dc9d02-ba7e-4b1e-8b1b-24654611c9ba"). InnerVolumeSpecName "kube-api-access-xzzbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.097445 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "84dc9d02-ba7e-4b1e-8b1b-24654611c9ba" (UID: "84dc9d02-ba7e-4b1e-8b1b-24654611c9ba"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.124181 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "84dc9d02-ba7e-4b1e-8b1b-24654611c9ba" (UID: "84dc9d02-ba7e-4b1e-8b1b-24654611c9ba"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.147176 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-inventory" (OuterVolumeSpecName: "inventory") pod "84dc9d02-ba7e-4b1e-8b1b-24654611c9ba" (UID: "84dc9d02-ba7e-4b1e-8b1b-24654611c9ba"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.147339 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "84dc9d02-ba7e-4b1e-8b1b-24654611c9ba" (UID: "84dc9d02-ba7e-4b1e-8b1b-24654611c9ba"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.156658 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "84dc9d02-ba7e-4b1e-8b1b-24654611c9ba" (UID: "84dc9d02-ba7e-4b1e-8b1b-24654611c9ba"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.192295 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.192338 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzzbj\" (UniqueName: \"kubernetes.io/projected/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-kube-api-access-xzzbj\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.192353 4769 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.192364 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.192373 4769 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.192387 4769 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/84dc9d02-ba7e-4b1e-8b1b-24654611c9ba-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.335709 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" event={"ID":"84dc9d02-ba7e-4b1e-8b1b-24654611c9ba","Type":"ContainerDied","Data":"11b1e0bc3625ff41c9aa26a7342db828e90327db0e58359318b94a9c43a31757"} Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.335770 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="11b1e0bc3625ff41c9aa26a7342db828e90327db0e58359318b94a9c43a31757" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.335841 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.442114 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k"] Nov 25 10:25:46 crc kubenswrapper[4769]: E1125 10:25:46.442690 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84dc9d02-ba7e-4b1e-8b1b-24654611c9ba" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.442710 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="84dc9d02-ba7e-4b1e-8b1b-24654611c9ba" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.442975 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="84dc9d02-ba7e-4b1e-8b1b-24654611c9ba" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.443887 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.446568 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.451778 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.451798 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.451990 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.452180 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.452817 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k"] Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.604251 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.604613 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.604772 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.605101 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.605411 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cswb7\" (UniqueName: \"kubernetes.io/projected/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-kube-api-access-cswb7\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.708113 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cswb7\" (UniqueName: \"kubernetes.io/projected/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-kube-api-access-cswb7\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.708199 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.709217 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.709304 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.709461 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.714609 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.714787 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.715132 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.715930 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.726727 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cswb7\" (UniqueName: \"kubernetes.io/projected/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-kube-api-access-cswb7\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7k57k\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:46 crc kubenswrapper[4769]: I1125 10:25:46.773178 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:25:47 crc kubenswrapper[4769]: I1125 10:25:47.404008 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k"] Nov 25 10:25:48 crc kubenswrapper[4769]: I1125 10:25:48.386504 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" event={"ID":"1d2d8eea-8aae-4e92-8d69-630eec1d95b9","Type":"ContainerStarted","Data":"3ba0fc13a7ac0eac230c7a7ab4d22b70b1d4fb7d0766809787a26faf0d25dce1"} Nov 25 10:25:48 crc kubenswrapper[4769]: I1125 10:25:48.387099 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" event={"ID":"1d2d8eea-8aae-4e92-8d69-630eec1d95b9","Type":"ContainerStarted","Data":"a9b25f5fbf6bfc8c77e3eea51cadd58f262c46deb369ae5abdb4b81acb001cde"} Nov 25 10:25:48 crc kubenswrapper[4769]: I1125 10:25:48.410220 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" podStartSLOduration=1.889102293 podStartE2EDuration="2.410201999s" podCreationTimestamp="2025-11-25 10:25:46 +0000 UTC" firstStartedPulling="2025-11-25 10:25:47.413375245 +0000 UTC m=+2495.998347558" lastFinishedPulling="2025-11-25 10:25:47.934474941 +0000 UTC m=+2496.519447264" observedRunningTime="2025-11-25 10:25:48.405167844 +0000 UTC m=+2496.990140177" watchObservedRunningTime="2025-11-25 10:25:48.410201999 +0000 UTC m=+2496.995174312" Nov 25 10:25:54 crc kubenswrapper[4769]: I1125 10:25:54.237161 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:25:54 crc kubenswrapper[4769]: E1125 10:25:54.238032 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:26:08 crc kubenswrapper[4769]: I1125 10:26:08.236782 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:26:08 crc kubenswrapper[4769]: E1125 10:26:08.237573 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:26:21 crc kubenswrapper[4769]: I1125 10:26:21.237690 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:26:21 crc kubenswrapper[4769]: E1125 10:26:21.238808 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:26:33 crc kubenswrapper[4769]: I1125 10:26:33.240567 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:26:33 crc kubenswrapper[4769]: E1125 10:26:33.241711 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:26:48 crc kubenswrapper[4769]: I1125 10:26:48.237160 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:26:48 crc kubenswrapper[4769]: E1125 10:26:48.238232 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:27:01 crc kubenswrapper[4769]: I1125 10:27:01.237434 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:27:01 crc kubenswrapper[4769]: E1125 10:27:01.238431 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:27:14 crc kubenswrapper[4769]: I1125 10:27:14.237265 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:27:14 crc kubenswrapper[4769]: E1125 10:27:14.238259 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:27:28 crc kubenswrapper[4769]: I1125 10:27:28.238548 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:27:28 crc kubenswrapper[4769]: I1125 10:27:28.718317 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"76c39a79fa79eb13861766be39df967f4ca3f6d724001859f3da0abc94b4b541"} Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.628832 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r8pgq"] Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.647032 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r8pgq"] Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.647160 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.779345 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6474158-bf1a-470c-8fd3-7a83f9db107b-utilities\") pod \"certified-operators-r8pgq\" (UID: \"c6474158-bf1a-470c-8fd3-7a83f9db107b\") " pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.779699 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6474158-bf1a-470c-8fd3-7a83f9db107b-catalog-content\") pod \"certified-operators-r8pgq\" (UID: \"c6474158-bf1a-470c-8fd3-7a83f9db107b\") " pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.780004 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pc7ls\" (UniqueName: \"kubernetes.io/projected/c6474158-bf1a-470c-8fd3-7a83f9db107b-kube-api-access-pc7ls\") pod \"certified-operators-r8pgq\" (UID: \"c6474158-bf1a-470c-8fd3-7a83f9db107b\") " pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.827711 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ffl6j"] Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.831357 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.842756 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ffl6j"] Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.883491 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6474158-bf1a-470c-8fd3-7a83f9db107b-catalog-content\") pod \"certified-operators-r8pgq\" (UID: \"c6474158-bf1a-470c-8fd3-7a83f9db107b\") " pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.883649 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pc7ls\" (UniqueName: \"kubernetes.io/projected/c6474158-bf1a-470c-8fd3-7a83f9db107b-kube-api-access-pc7ls\") pod \"certified-operators-r8pgq\" (UID: \"c6474158-bf1a-470c-8fd3-7a83f9db107b\") " pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.883895 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6474158-bf1a-470c-8fd3-7a83f9db107b-utilities\") pod \"certified-operators-r8pgq\" (UID: \"c6474158-bf1a-470c-8fd3-7a83f9db107b\") " pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.884159 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6474158-bf1a-470c-8fd3-7a83f9db107b-catalog-content\") pod \"certified-operators-r8pgq\" (UID: \"c6474158-bf1a-470c-8fd3-7a83f9db107b\") " pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.884601 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6474158-bf1a-470c-8fd3-7a83f9db107b-utilities\") pod \"certified-operators-r8pgq\" (UID: \"c6474158-bf1a-470c-8fd3-7a83f9db107b\") " pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.915067 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pc7ls\" (UniqueName: \"kubernetes.io/projected/c6474158-bf1a-470c-8fd3-7a83f9db107b-kube-api-access-pc7ls\") pod \"certified-operators-r8pgq\" (UID: \"c6474158-bf1a-470c-8fd3-7a83f9db107b\") " pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.972846 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.986314 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrhtz\" (UniqueName: \"kubernetes.io/projected/b590bf5e-f400-4c9f-887d-808a2d67bf8c-kube-api-access-qrhtz\") pod \"community-operators-ffl6j\" (UID: \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\") " pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.986380 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b590bf5e-f400-4c9f-887d-808a2d67bf8c-utilities\") pod \"community-operators-ffl6j\" (UID: \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\") " pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:28 crc kubenswrapper[4769]: I1125 10:29:28.986700 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b590bf5e-f400-4c9f-887d-808a2d67bf8c-catalog-content\") pod \"community-operators-ffl6j\" (UID: \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\") " pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:29 crc kubenswrapper[4769]: I1125 10:29:29.091033 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b590bf5e-f400-4c9f-887d-808a2d67bf8c-catalog-content\") pod \"community-operators-ffl6j\" (UID: \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\") " pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:29 crc kubenswrapper[4769]: I1125 10:29:29.091648 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrhtz\" (UniqueName: \"kubernetes.io/projected/b590bf5e-f400-4c9f-887d-808a2d67bf8c-kube-api-access-qrhtz\") pod \"community-operators-ffl6j\" (UID: \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\") " pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:29 crc kubenswrapper[4769]: I1125 10:29:29.091708 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b590bf5e-f400-4c9f-887d-808a2d67bf8c-utilities\") pod \"community-operators-ffl6j\" (UID: \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\") " pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:29 crc kubenswrapper[4769]: I1125 10:29:29.092039 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b590bf5e-f400-4c9f-887d-808a2d67bf8c-catalog-content\") pod \"community-operators-ffl6j\" (UID: \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\") " pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:29 crc kubenswrapper[4769]: I1125 10:29:29.092246 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b590bf5e-f400-4c9f-887d-808a2d67bf8c-utilities\") pod \"community-operators-ffl6j\" (UID: \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\") " pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:29 crc kubenswrapper[4769]: I1125 10:29:29.121603 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrhtz\" (UniqueName: \"kubernetes.io/projected/b590bf5e-f400-4c9f-887d-808a2d67bf8c-kube-api-access-qrhtz\") pod \"community-operators-ffl6j\" (UID: \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\") " pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:29 crc kubenswrapper[4769]: I1125 10:29:29.152135 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:30 crc kubenswrapper[4769]: I1125 10:29:30.211925 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ffl6j"] Nov 25 10:29:30 crc kubenswrapper[4769]: I1125 10:29:30.231099 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r8pgq"] Nov 25 10:29:30 crc kubenswrapper[4769]: I1125 10:29:30.288908 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffl6j" event={"ID":"b590bf5e-f400-4c9f-887d-808a2d67bf8c","Type":"ContainerStarted","Data":"6d5003367740727619242e2f421e43b501772bb5b2c96014ef52cf63e325103c"} Nov 25 10:29:30 crc kubenswrapper[4769]: I1125 10:29:30.295009 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r8pgq" event={"ID":"c6474158-bf1a-470c-8fd3-7a83f9db107b","Type":"ContainerStarted","Data":"16412a01a9440fa2f9aed3180256ace0329773c00d3b283f5ff296ef1632b504"} Nov 25 10:29:31 crc kubenswrapper[4769]: I1125 10:29:31.310520 4769 generic.go:334] "Generic (PLEG): container finished" podID="c6474158-bf1a-470c-8fd3-7a83f9db107b" containerID="4841742999a7e5c16db36d373b8ad092c3197a52f28c6752769cb7f39e8f638e" exitCode=0 Nov 25 10:29:31 crc kubenswrapper[4769]: I1125 10:29:31.310656 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r8pgq" event={"ID":"c6474158-bf1a-470c-8fd3-7a83f9db107b","Type":"ContainerDied","Data":"4841742999a7e5c16db36d373b8ad092c3197a52f28c6752769cb7f39e8f638e"} Nov 25 10:29:31 crc kubenswrapper[4769]: I1125 10:29:31.315775 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:29:31 crc kubenswrapper[4769]: I1125 10:29:31.315781 4769 generic.go:334] "Generic (PLEG): container finished" podID="b590bf5e-f400-4c9f-887d-808a2d67bf8c" containerID="e5e6b5d1d3b90852e6b9284476e87ac49857dc647bb4403d3c5104a288930a8f" exitCode=0 Nov 25 10:29:31 crc kubenswrapper[4769]: I1125 10:29:31.315846 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffl6j" event={"ID":"b590bf5e-f400-4c9f-887d-808a2d67bf8c","Type":"ContainerDied","Data":"e5e6b5d1d3b90852e6b9284476e87ac49857dc647bb4403d3c5104a288930a8f"} Nov 25 10:29:32 crc kubenswrapper[4769]: I1125 10:29:32.328070 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r8pgq" event={"ID":"c6474158-bf1a-470c-8fd3-7a83f9db107b","Type":"ContainerStarted","Data":"4483443ef65766fc45bc59041153778936a49d821321a891cea269c7fbc94245"} Nov 25 10:29:33 crc kubenswrapper[4769]: I1125 10:29:33.341561 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffl6j" event={"ID":"b590bf5e-f400-4c9f-887d-808a2d67bf8c","Type":"ContainerStarted","Data":"37ba24467088e1fdacffaa209c8b7b39281d804e97e1353c6763ce6180e9cbd3"} Nov 25 10:29:35 crc kubenswrapper[4769]: I1125 10:29:35.373651 4769 generic.go:334] "Generic (PLEG): container finished" podID="c6474158-bf1a-470c-8fd3-7a83f9db107b" containerID="4483443ef65766fc45bc59041153778936a49d821321a891cea269c7fbc94245" exitCode=0 Nov 25 10:29:35 crc kubenswrapper[4769]: I1125 10:29:35.374159 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r8pgq" event={"ID":"c6474158-bf1a-470c-8fd3-7a83f9db107b","Type":"ContainerDied","Data":"4483443ef65766fc45bc59041153778936a49d821321a891cea269c7fbc94245"} Nov 25 10:29:35 crc kubenswrapper[4769]: I1125 10:29:35.382408 4769 generic.go:334] "Generic (PLEG): container finished" podID="b590bf5e-f400-4c9f-887d-808a2d67bf8c" containerID="37ba24467088e1fdacffaa209c8b7b39281d804e97e1353c6763ce6180e9cbd3" exitCode=0 Nov 25 10:29:35 crc kubenswrapper[4769]: I1125 10:29:35.382451 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffl6j" event={"ID":"b590bf5e-f400-4c9f-887d-808a2d67bf8c","Type":"ContainerDied","Data":"37ba24467088e1fdacffaa209c8b7b39281d804e97e1353c6763ce6180e9cbd3"} Nov 25 10:29:36 crc kubenswrapper[4769]: I1125 10:29:36.398319 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffl6j" event={"ID":"b590bf5e-f400-4c9f-887d-808a2d67bf8c","Type":"ContainerStarted","Data":"432b0e6dc903114d2f859e43a3d38e3ed7c64c71ee88777781542b183320a07e"} Nov 25 10:29:36 crc kubenswrapper[4769]: I1125 10:29:36.402512 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r8pgq" event={"ID":"c6474158-bf1a-470c-8fd3-7a83f9db107b","Type":"ContainerStarted","Data":"bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4"} Nov 25 10:29:36 crc kubenswrapper[4769]: I1125 10:29:36.436249 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ffl6j" podStartSLOduration=3.933650712 podStartE2EDuration="8.436225125s" podCreationTimestamp="2025-11-25 10:29:28 +0000 UTC" firstStartedPulling="2025-11-25 10:29:31.319508949 +0000 UTC m=+2719.904481262" lastFinishedPulling="2025-11-25 10:29:35.822083362 +0000 UTC m=+2724.407055675" observedRunningTime="2025-11-25 10:29:36.424449669 +0000 UTC m=+2725.009422022" watchObservedRunningTime="2025-11-25 10:29:36.436225125 +0000 UTC m=+2725.021197438" Nov 25 10:29:36 crc kubenswrapper[4769]: I1125 10:29:36.447385 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r8pgq" podStartSLOduration=3.980426398 podStartE2EDuration="8.447362454s" podCreationTimestamp="2025-11-25 10:29:28 +0000 UTC" firstStartedPulling="2025-11-25 10:29:31.315383949 +0000 UTC m=+2719.900356272" lastFinishedPulling="2025-11-25 10:29:35.782320015 +0000 UTC m=+2724.367292328" observedRunningTime="2025-11-25 10:29:36.443585253 +0000 UTC m=+2725.028557596" watchObservedRunningTime="2025-11-25 10:29:36.447362454 +0000 UTC m=+2725.032334757" Nov 25 10:29:38 crc kubenswrapper[4769]: I1125 10:29:38.973066 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:38 crc kubenswrapper[4769]: I1125 10:29:38.973810 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:39 crc kubenswrapper[4769]: I1125 10:29:39.153121 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:39 crc kubenswrapper[4769]: I1125 10:29:39.153201 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:39 crc kubenswrapper[4769]: I1125 10:29:39.242755 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:40 crc kubenswrapper[4769]: I1125 10:29:40.035586 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-r8pgq" podUID="c6474158-bf1a-470c-8fd3-7a83f9db107b" containerName="registry-server" probeResult="failure" output=< Nov 25 10:29:40 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 10:29:40 crc kubenswrapper[4769]: > Nov 25 10:29:49 crc kubenswrapper[4769]: I1125 10:29:49.061369 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:49 crc kubenswrapper[4769]: I1125 10:29:49.125608 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:49 crc kubenswrapper[4769]: I1125 10:29:49.232396 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:49 crc kubenswrapper[4769]: I1125 10:29:49.307669 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r8pgq"] Nov 25 10:29:51 crc kubenswrapper[4769]: I1125 10:29:51.523172 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ffl6j"] Nov 25 10:29:51 crc kubenswrapper[4769]: I1125 10:29:51.524208 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ffl6j" podUID="b590bf5e-f400-4c9f-887d-808a2d67bf8c" containerName="registry-server" containerID="cri-o://432b0e6dc903114d2f859e43a3d38e3ed7c64c71ee88777781542b183320a07e" gracePeriod=2 Nov 25 10:29:51 crc kubenswrapper[4769]: I1125 10:29:51.935493 4769 generic.go:334] "Generic (PLEG): container finished" podID="b590bf5e-f400-4c9f-887d-808a2d67bf8c" containerID="432b0e6dc903114d2f859e43a3d38e3ed7c64c71ee88777781542b183320a07e" exitCode=0 Nov 25 10:29:51 crc kubenswrapper[4769]: I1125 10:29:51.935576 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffl6j" event={"ID":"b590bf5e-f400-4c9f-887d-808a2d67bf8c","Type":"ContainerDied","Data":"432b0e6dc903114d2f859e43a3d38e3ed7c64c71ee88777781542b183320a07e"} Nov 25 10:29:51 crc kubenswrapper[4769]: I1125 10:29:51.936056 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r8pgq" podUID="c6474158-bf1a-470c-8fd3-7a83f9db107b" containerName="registry-server" containerID="cri-o://bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4" gracePeriod=2 Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.294568 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.297865 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.524601 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.614539 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.618831 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6474158-bf1a-470c-8fd3-7a83f9db107b-utilities\") pod \"c6474158-bf1a-470c-8fd3-7a83f9db107b\" (UID: \"c6474158-bf1a-470c-8fd3-7a83f9db107b\") " Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.619055 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pc7ls\" (UniqueName: \"kubernetes.io/projected/c6474158-bf1a-470c-8fd3-7a83f9db107b-kube-api-access-pc7ls\") pod \"c6474158-bf1a-470c-8fd3-7a83f9db107b\" (UID: \"c6474158-bf1a-470c-8fd3-7a83f9db107b\") " Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.619179 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6474158-bf1a-470c-8fd3-7a83f9db107b-catalog-content\") pod \"c6474158-bf1a-470c-8fd3-7a83f9db107b\" (UID: \"c6474158-bf1a-470c-8fd3-7a83f9db107b\") " Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.619925 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6474158-bf1a-470c-8fd3-7a83f9db107b-utilities" (OuterVolumeSpecName: "utilities") pod "c6474158-bf1a-470c-8fd3-7a83f9db107b" (UID: "c6474158-bf1a-470c-8fd3-7a83f9db107b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.620123 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c6474158-bf1a-470c-8fd3-7a83f9db107b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.625366 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6474158-bf1a-470c-8fd3-7a83f9db107b-kube-api-access-pc7ls" (OuterVolumeSpecName: "kube-api-access-pc7ls") pod "c6474158-bf1a-470c-8fd3-7a83f9db107b" (UID: "c6474158-bf1a-470c-8fd3-7a83f9db107b"). InnerVolumeSpecName "kube-api-access-pc7ls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.666272 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6474158-bf1a-470c-8fd3-7a83f9db107b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c6474158-bf1a-470c-8fd3-7a83f9db107b" (UID: "c6474158-bf1a-470c-8fd3-7a83f9db107b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.721797 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b590bf5e-f400-4c9f-887d-808a2d67bf8c-utilities\") pod \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\" (UID: \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\") " Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.721887 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qrhtz\" (UniqueName: \"kubernetes.io/projected/b590bf5e-f400-4c9f-887d-808a2d67bf8c-kube-api-access-qrhtz\") pod \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\" (UID: \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\") " Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.722054 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b590bf5e-f400-4c9f-887d-808a2d67bf8c-catalog-content\") pod \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\" (UID: \"b590bf5e-f400-4c9f-887d-808a2d67bf8c\") " Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.722890 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c6474158-bf1a-470c-8fd3-7a83f9db107b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.722918 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pc7ls\" (UniqueName: \"kubernetes.io/projected/c6474158-bf1a-470c-8fd3-7a83f9db107b-kube-api-access-pc7ls\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.722884 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b590bf5e-f400-4c9f-887d-808a2d67bf8c-utilities" (OuterVolumeSpecName: "utilities") pod "b590bf5e-f400-4c9f-887d-808a2d67bf8c" (UID: "b590bf5e-f400-4c9f-887d-808a2d67bf8c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.725797 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b590bf5e-f400-4c9f-887d-808a2d67bf8c-kube-api-access-qrhtz" (OuterVolumeSpecName: "kube-api-access-qrhtz") pod "b590bf5e-f400-4c9f-887d-808a2d67bf8c" (UID: "b590bf5e-f400-4c9f-887d-808a2d67bf8c"). InnerVolumeSpecName "kube-api-access-qrhtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.778304 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b590bf5e-f400-4c9f-887d-808a2d67bf8c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b590bf5e-f400-4c9f-887d-808a2d67bf8c" (UID: "b590bf5e-f400-4c9f-887d-808a2d67bf8c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.825423 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b590bf5e-f400-4c9f-887d-808a2d67bf8c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.825487 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qrhtz\" (UniqueName: \"kubernetes.io/projected/b590bf5e-f400-4c9f-887d-808a2d67bf8c-kube-api-access-qrhtz\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.825504 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b590bf5e-f400-4c9f-887d-808a2d67bf8c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.949809 4769 generic.go:334] "Generic (PLEG): container finished" podID="c6474158-bf1a-470c-8fd3-7a83f9db107b" containerID="bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4" exitCode=0 Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.949888 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r8pgq" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.949922 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r8pgq" event={"ID":"c6474158-bf1a-470c-8fd3-7a83f9db107b","Type":"ContainerDied","Data":"bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4"} Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.950498 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r8pgq" event={"ID":"c6474158-bf1a-470c-8fd3-7a83f9db107b","Type":"ContainerDied","Data":"16412a01a9440fa2f9aed3180256ace0329773c00d3b283f5ff296ef1632b504"} Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.950534 4769 scope.go:117] "RemoveContainer" containerID="bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.954072 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ffl6j" event={"ID":"b590bf5e-f400-4c9f-887d-808a2d67bf8c","Type":"ContainerDied","Data":"6d5003367740727619242e2f421e43b501772bb5b2c96014ef52cf63e325103c"} Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.954207 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ffl6j" Nov 25 10:29:52 crc kubenswrapper[4769]: I1125 10:29:52.989126 4769 scope.go:117] "RemoveContainer" containerID="4483443ef65766fc45bc59041153778936a49d821321a891cea269c7fbc94245" Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.000345 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r8pgq"] Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.024083 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r8pgq"] Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.027085 4769 scope.go:117] "RemoveContainer" containerID="4841742999a7e5c16db36d373b8ad092c3197a52f28c6752769cb7f39e8f638e" Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.041715 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ffl6j"] Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.057598 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ffl6j"] Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.085589 4769 scope.go:117] "RemoveContainer" containerID="bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4" Nov 25 10:29:53 crc kubenswrapper[4769]: E1125 10:29:53.085951 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4\": container with ID starting with bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4 not found: ID does not exist" containerID="bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4" Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.086009 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4"} err="failed to get container status \"bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4\": rpc error: code = NotFound desc = could not find container \"bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4\": container with ID starting with bcc2d2d11c4455cadb60f1767bbb3cc2ef2978a81ca267e3302d69bf5375efa4 not found: ID does not exist" Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.086036 4769 scope.go:117] "RemoveContainer" containerID="4483443ef65766fc45bc59041153778936a49d821321a891cea269c7fbc94245" Nov 25 10:29:53 crc kubenswrapper[4769]: E1125 10:29:53.086346 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4483443ef65766fc45bc59041153778936a49d821321a891cea269c7fbc94245\": container with ID starting with 4483443ef65766fc45bc59041153778936a49d821321a891cea269c7fbc94245 not found: ID does not exist" containerID="4483443ef65766fc45bc59041153778936a49d821321a891cea269c7fbc94245" Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.086376 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4483443ef65766fc45bc59041153778936a49d821321a891cea269c7fbc94245"} err="failed to get container status \"4483443ef65766fc45bc59041153778936a49d821321a891cea269c7fbc94245\": rpc error: code = NotFound desc = could not find container \"4483443ef65766fc45bc59041153778936a49d821321a891cea269c7fbc94245\": container with ID starting with 4483443ef65766fc45bc59041153778936a49d821321a891cea269c7fbc94245 not found: ID does not exist" Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.086393 4769 scope.go:117] "RemoveContainer" containerID="4841742999a7e5c16db36d373b8ad092c3197a52f28c6752769cb7f39e8f638e" Nov 25 10:29:53 crc kubenswrapper[4769]: E1125 10:29:53.086704 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4841742999a7e5c16db36d373b8ad092c3197a52f28c6752769cb7f39e8f638e\": container with ID starting with 4841742999a7e5c16db36d373b8ad092c3197a52f28c6752769cb7f39e8f638e not found: ID does not exist" containerID="4841742999a7e5c16db36d373b8ad092c3197a52f28c6752769cb7f39e8f638e" Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.086730 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4841742999a7e5c16db36d373b8ad092c3197a52f28c6752769cb7f39e8f638e"} err="failed to get container status \"4841742999a7e5c16db36d373b8ad092c3197a52f28c6752769cb7f39e8f638e\": rpc error: code = NotFound desc = could not find container \"4841742999a7e5c16db36d373b8ad092c3197a52f28c6752769cb7f39e8f638e\": container with ID starting with 4841742999a7e5c16db36d373b8ad092c3197a52f28c6752769cb7f39e8f638e not found: ID does not exist" Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.086748 4769 scope.go:117] "RemoveContainer" containerID="432b0e6dc903114d2f859e43a3d38e3ed7c64c71ee88777781542b183320a07e" Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.147942 4769 scope.go:117] "RemoveContainer" containerID="37ba24467088e1fdacffaa209c8b7b39281d804e97e1353c6763ce6180e9cbd3" Nov 25 10:29:53 crc kubenswrapper[4769]: I1125 10:29:53.180391 4769 scope.go:117] "RemoveContainer" containerID="e5e6b5d1d3b90852e6b9284476e87ac49857dc647bb4403d3c5104a288930a8f" Nov 25 10:29:54 crc kubenswrapper[4769]: I1125 10:29:54.251633 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b590bf5e-f400-4c9f-887d-808a2d67bf8c" path="/var/lib/kubelet/pods/b590bf5e-f400-4c9f-887d-808a2d67bf8c/volumes" Nov 25 10:29:54 crc kubenswrapper[4769]: I1125 10:29:54.254238 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6474158-bf1a-470c-8fd3-7a83f9db107b" path="/var/lib/kubelet/pods/c6474158-bf1a-470c-8fd3-7a83f9db107b/volumes" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.169334 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq"] Nov 25 10:30:00 crc kubenswrapper[4769]: E1125 10:30:00.170658 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b590bf5e-f400-4c9f-887d-808a2d67bf8c" containerName="extract-content" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.170676 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b590bf5e-f400-4c9f-887d-808a2d67bf8c" containerName="extract-content" Nov 25 10:30:00 crc kubenswrapper[4769]: E1125 10:30:00.170705 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6474158-bf1a-470c-8fd3-7a83f9db107b" containerName="extract-utilities" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.170712 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6474158-bf1a-470c-8fd3-7a83f9db107b" containerName="extract-utilities" Nov 25 10:30:00 crc kubenswrapper[4769]: E1125 10:30:00.170729 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b590bf5e-f400-4c9f-887d-808a2d67bf8c" containerName="extract-utilities" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.170736 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b590bf5e-f400-4c9f-887d-808a2d67bf8c" containerName="extract-utilities" Nov 25 10:30:00 crc kubenswrapper[4769]: E1125 10:30:00.170760 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6474158-bf1a-470c-8fd3-7a83f9db107b" containerName="extract-content" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.170768 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6474158-bf1a-470c-8fd3-7a83f9db107b" containerName="extract-content" Nov 25 10:30:00 crc kubenswrapper[4769]: E1125 10:30:00.170799 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b590bf5e-f400-4c9f-887d-808a2d67bf8c" containerName="registry-server" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.170806 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b590bf5e-f400-4c9f-887d-808a2d67bf8c" containerName="registry-server" Nov 25 10:30:00 crc kubenswrapper[4769]: E1125 10:30:00.170832 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6474158-bf1a-470c-8fd3-7a83f9db107b" containerName="registry-server" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.170840 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6474158-bf1a-470c-8fd3-7a83f9db107b" containerName="registry-server" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.171114 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="b590bf5e-f400-4c9f-887d-808a2d67bf8c" containerName="registry-server" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.171155 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6474158-bf1a-470c-8fd3-7a83f9db107b" containerName="registry-server" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.172221 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.174871 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.180004 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.204241 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq"] Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.248571 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/868a47b4-3718-435c-a357-bfef84fd4d51-config-volume\") pod \"collect-profiles-29401110-knhbq\" (UID: \"868a47b4-3718-435c-a357-bfef84fd4d51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.248602 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/868a47b4-3718-435c-a357-bfef84fd4d51-secret-volume\") pod \"collect-profiles-29401110-knhbq\" (UID: \"868a47b4-3718-435c-a357-bfef84fd4d51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.248743 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqsz2\" (UniqueName: \"kubernetes.io/projected/868a47b4-3718-435c-a357-bfef84fd4d51-kube-api-access-hqsz2\") pod \"collect-profiles-29401110-knhbq\" (UID: \"868a47b4-3718-435c-a357-bfef84fd4d51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.350950 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/868a47b4-3718-435c-a357-bfef84fd4d51-config-volume\") pod \"collect-profiles-29401110-knhbq\" (UID: \"868a47b4-3718-435c-a357-bfef84fd4d51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.351029 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/868a47b4-3718-435c-a357-bfef84fd4d51-secret-volume\") pod \"collect-profiles-29401110-knhbq\" (UID: \"868a47b4-3718-435c-a357-bfef84fd4d51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.351167 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqsz2\" (UniqueName: \"kubernetes.io/projected/868a47b4-3718-435c-a357-bfef84fd4d51-kube-api-access-hqsz2\") pod \"collect-profiles-29401110-knhbq\" (UID: \"868a47b4-3718-435c-a357-bfef84fd4d51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.351784 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/868a47b4-3718-435c-a357-bfef84fd4d51-config-volume\") pod \"collect-profiles-29401110-knhbq\" (UID: \"868a47b4-3718-435c-a357-bfef84fd4d51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.364167 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/868a47b4-3718-435c-a357-bfef84fd4d51-secret-volume\") pod \"collect-profiles-29401110-knhbq\" (UID: \"868a47b4-3718-435c-a357-bfef84fd4d51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.379334 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqsz2\" (UniqueName: \"kubernetes.io/projected/868a47b4-3718-435c-a357-bfef84fd4d51-kube-api-access-hqsz2\") pod \"collect-profiles-29401110-knhbq\" (UID: \"868a47b4-3718-435c-a357-bfef84fd4d51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:00 crc kubenswrapper[4769]: I1125 10:30:00.506088 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:01 crc kubenswrapper[4769]: I1125 10:30:01.019773 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq"] Nov 25 10:30:01 crc kubenswrapper[4769]: I1125 10:30:01.055213 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" event={"ID":"868a47b4-3718-435c-a357-bfef84fd4d51","Type":"ContainerStarted","Data":"e9d04129e9726a330366eecaecabd4cd837ff82d5e2e45918abbec0c7a46039d"} Nov 25 10:30:02 crc kubenswrapper[4769]: I1125 10:30:02.069246 4769 generic.go:334] "Generic (PLEG): container finished" podID="868a47b4-3718-435c-a357-bfef84fd4d51" containerID="d0fc9c539b2bf9a3d924a390bfa07f112b874862d2ba3b2ce19a8ff8b60acc05" exitCode=0 Nov 25 10:30:02 crc kubenswrapper[4769]: I1125 10:30:02.069708 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" event={"ID":"868a47b4-3718-435c-a357-bfef84fd4d51","Type":"ContainerDied","Data":"d0fc9c539b2bf9a3d924a390bfa07f112b874862d2ba3b2ce19a8ff8b60acc05"} Nov 25 10:30:03 crc kubenswrapper[4769]: I1125 10:30:03.510688 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:03 crc kubenswrapper[4769]: I1125 10:30:03.652208 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqsz2\" (UniqueName: \"kubernetes.io/projected/868a47b4-3718-435c-a357-bfef84fd4d51-kube-api-access-hqsz2\") pod \"868a47b4-3718-435c-a357-bfef84fd4d51\" (UID: \"868a47b4-3718-435c-a357-bfef84fd4d51\") " Nov 25 10:30:03 crc kubenswrapper[4769]: I1125 10:30:03.652373 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/868a47b4-3718-435c-a357-bfef84fd4d51-secret-volume\") pod \"868a47b4-3718-435c-a357-bfef84fd4d51\" (UID: \"868a47b4-3718-435c-a357-bfef84fd4d51\") " Nov 25 10:30:03 crc kubenswrapper[4769]: I1125 10:30:03.652561 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/868a47b4-3718-435c-a357-bfef84fd4d51-config-volume\") pod \"868a47b4-3718-435c-a357-bfef84fd4d51\" (UID: \"868a47b4-3718-435c-a357-bfef84fd4d51\") " Nov 25 10:30:03 crc kubenswrapper[4769]: I1125 10:30:03.654240 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/868a47b4-3718-435c-a357-bfef84fd4d51-config-volume" (OuterVolumeSpecName: "config-volume") pod "868a47b4-3718-435c-a357-bfef84fd4d51" (UID: "868a47b4-3718-435c-a357-bfef84fd4d51"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:30:03 crc kubenswrapper[4769]: I1125 10:30:03.659472 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/868a47b4-3718-435c-a357-bfef84fd4d51-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "868a47b4-3718-435c-a357-bfef84fd4d51" (UID: "868a47b4-3718-435c-a357-bfef84fd4d51"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:30:03 crc kubenswrapper[4769]: I1125 10:30:03.660185 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/868a47b4-3718-435c-a357-bfef84fd4d51-kube-api-access-hqsz2" (OuterVolumeSpecName: "kube-api-access-hqsz2") pod "868a47b4-3718-435c-a357-bfef84fd4d51" (UID: "868a47b4-3718-435c-a357-bfef84fd4d51"). InnerVolumeSpecName "kube-api-access-hqsz2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:30:03 crc kubenswrapper[4769]: I1125 10:30:03.755847 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqsz2\" (UniqueName: \"kubernetes.io/projected/868a47b4-3718-435c-a357-bfef84fd4d51-kube-api-access-hqsz2\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:03 crc kubenswrapper[4769]: I1125 10:30:03.755901 4769 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/868a47b4-3718-435c-a357-bfef84fd4d51-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:03 crc kubenswrapper[4769]: I1125 10:30:03.755920 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/868a47b4-3718-435c-a357-bfef84fd4d51-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:04 crc kubenswrapper[4769]: I1125 10:30:04.102383 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" event={"ID":"868a47b4-3718-435c-a357-bfef84fd4d51","Type":"ContainerDied","Data":"e9d04129e9726a330366eecaecabd4cd837ff82d5e2e45918abbec0c7a46039d"} Nov 25 10:30:04 crc kubenswrapper[4769]: I1125 10:30:04.102432 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9d04129e9726a330366eecaecabd4cd837ff82d5e2e45918abbec0c7a46039d" Nov 25 10:30:04 crc kubenswrapper[4769]: I1125 10:30:04.102448 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq" Nov 25 10:30:04 crc kubenswrapper[4769]: I1125 10:30:04.582149 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9"] Nov 25 10:30:04 crc kubenswrapper[4769]: I1125 10:30:04.592257 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-bbzl9"] Nov 25 10:30:06 crc kubenswrapper[4769]: I1125 10:30:06.255404 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21dad565-8414-4796-b777-e63f27b9c666" path="/var/lib/kubelet/pods/21dad565-8414-4796-b777-e63f27b9c666/volumes" Nov 25 10:30:10 crc kubenswrapper[4769]: I1125 10:30:10.209111 4769 generic.go:334] "Generic (PLEG): container finished" podID="1d2d8eea-8aae-4e92-8d69-630eec1d95b9" containerID="3ba0fc13a7ac0eac230c7a7ab4d22b70b1d4fb7d0766809787a26faf0d25dce1" exitCode=0 Nov 25 10:30:10 crc kubenswrapper[4769]: I1125 10:30:10.209238 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" event={"ID":"1d2d8eea-8aae-4e92-8d69-630eec1d95b9","Type":"ContainerDied","Data":"3ba0fc13a7ac0eac230c7a7ab4d22b70b1d4fb7d0766809787a26faf0d25dce1"} Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.694518 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.787837 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-ssh-key\") pod \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.787942 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-inventory\") pod \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.788017 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-libvirt-secret-0\") pod \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.788104 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cswb7\" (UniqueName: \"kubernetes.io/projected/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-kube-api-access-cswb7\") pod \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.788269 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-libvirt-combined-ca-bundle\") pod \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\" (UID: \"1d2d8eea-8aae-4e92-8d69-630eec1d95b9\") " Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.795188 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "1d2d8eea-8aae-4e92-8d69-630eec1d95b9" (UID: "1d2d8eea-8aae-4e92-8d69-630eec1d95b9"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.796181 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-kube-api-access-cswb7" (OuterVolumeSpecName: "kube-api-access-cswb7") pod "1d2d8eea-8aae-4e92-8d69-630eec1d95b9" (UID: "1d2d8eea-8aae-4e92-8d69-630eec1d95b9"). InnerVolumeSpecName "kube-api-access-cswb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.823715 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1d2d8eea-8aae-4e92-8d69-630eec1d95b9" (UID: "1d2d8eea-8aae-4e92-8d69-630eec1d95b9"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.834861 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-inventory" (OuterVolumeSpecName: "inventory") pod "1d2d8eea-8aae-4e92-8d69-630eec1d95b9" (UID: "1d2d8eea-8aae-4e92-8d69-630eec1d95b9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.854443 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "1d2d8eea-8aae-4e92-8d69-630eec1d95b9" (UID: "1d2d8eea-8aae-4e92-8d69-630eec1d95b9"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.891421 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.891461 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.891475 4769 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.891491 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cswb7\" (UniqueName: \"kubernetes.io/projected/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-kube-api-access-cswb7\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:11 crc kubenswrapper[4769]: I1125 10:30:11.891504 4769 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d2d8eea-8aae-4e92-8d69-630eec1d95b9-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.234264 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" event={"ID":"1d2d8eea-8aae-4e92-8d69-630eec1d95b9","Type":"ContainerDied","Data":"a9b25f5fbf6bfc8c77e3eea51cadd58f262c46deb369ae5abdb4b81acb001cde"} Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.234321 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7k57k" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.234346 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9b25f5fbf6bfc8c77e3eea51cadd58f262c46deb369ae5abdb4b81acb001cde" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.326336 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms"] Nov 25 10:30:12 crc kubenswrapper[4769]: E1125 10:30:12.326834 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="868a47b4-3718-435c-a357-bfef84fd4d51" containerName="collect-profiles" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.326853 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="868a47b4-3718-435c-a357-bfef84fd4d51" containerName="collect-profiles" Nov 25 10:30:12 crc kubenswrapper[4769]: E1125 10:30:12.326898 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d2d8eea-8aae-4e92-8d69-630eec1d95b9" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.326906 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d2d8eea-8aae-4e92-8d69-630eec1d95b9" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.327164 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="868a47b4-3718-435c-a357-bfef84fd4d51" containerName="collect-profiles" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.327184 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d2d8eea-8aae-4e92-8d69-630eec1d95b9" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.328029 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.330475 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.330686 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.330894 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.331042 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.331155 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.331359 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.331471 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.364090 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms"] Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.408312 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.408402 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.408450 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.408642 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.409118 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.409153 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dfvx\" (UniqueName: \"kubernetes.io/projected/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-kube-api-access-6dfvx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.409225 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.409299 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.409357 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.513660 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.514282 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.514331 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dfvx\" (UniqueName: \"kubernetes.io/projected/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-kube-api-access-6dfvx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.514405 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.514474 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.514537 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.514673 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.514746 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.514802 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.515366 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.518213 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.518858 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.521789 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.521795 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.522834 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.524207 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.525674 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.532591 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dfvx\" (UniqueName: \"kubernetes.io/projected/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-kube-api-access-6dfvx\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zms\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:12 crc kubenswrapper[4769]: I1125 10:30:12.652089 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:30:13 crc kubenswrapper[4769]: I1125 10:30:13.273947 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms"] Nov 25 10:30:13 crc kubenswrapper[4769]: W1125 10:30:13.281249 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88ab5006_f5a9_4c7c_b4c9_7b74e12e0ad7.slice/crio-0dcc980c4e3b6be926d8b091aa0f7af32d43366d073f2ae2685f911a00019979 WatchSource:0}: Error finding container 0dcc980c4e3b6be926d8b091aa0f7af32d43366d073f2ae2685f911a00019979: Status 404 returned error can't find the container with id 0dcc980c4e3b6be926d8b091aa0f7af32d43366d073f2ae2685f911a00019979 Nov 25 10:30:14 crc kubenswrapper[4769]: I1125 10:30:14.287992 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" event={"ID":"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7","Type":"ContainerStarted","Data":"4e906da4663ad96c4de13873befcd92dde01f5d3bf1226b5d9ef9d1934b91779"} Nov 25 10:30:14 crc kubenswrapper[4769]: I1125 10:30:14.288611 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" event={"ID":"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7","Type":"ContainerStarted","Data":"0dcc980c4e3b6be926d8b091aa0f7af32d43366d073f2ae2685f911a00019979"} Nov 25 10:30:14 crc kubenswrapper[4769]: I1125 10:30:14.318367 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" podStartSLOduration=1.7424202709999999 podStartE2EDuration="2.314542597s" podCreationTimestamp="2025-11-25 10:30:12 +0000 UTC" firstStartedPulling="2025-11-25 10:30:13.28594351 +0000 UTC m=+2761.870915823" lastFinishedPulling="2025-11-25 10:30:13.858065836 +0000 UTC m=+2762.443038149" observedRunningTime="2025-11-25 10:30:14.309101241 +0000 UTC m=+2762.894073554" watchObservedRunningTime="2025-11-25 10:30:14.314542597 +0000 UTC m=+2762.899514910" Nov 25 10:30:22 crc kubenswrapper[4769]: I1125 10:30:22.291073 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:30:22 crc kubenswrapper[4769]: I1125 10:30:22.291863 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:30:47 crc kubenswrapper[4769]: I1125 10:30:47.839428 4769 scope.go:117] "RemoveContainer" containerID="039423952c8f0d9590aaeeab3b5c963aa92c26970c18c88a2a596c36db833521" Nov 25 10:30:52 crc kubenswrapper[4769]: I1125 10:30:52.290472 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:30:52 crc kubenswrapper[4769]: I1125 10:30:52.291165 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:30:52 crc kubenswrapper[4769]: I1125 10:30:52.291239 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 10:30:52 crc kubenswrapper[4769]: I1125 10:30:52.292382 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"76c39a79fa79eb13861766be39df967f4ca3f6d724001859f3da0abc94b4b541"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:30:52 crc kubenswrapper[4769]: I1125 10:30:52.292456 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://76c39a79fa79eb13861766be39df967f4ca3f6d724001859f3da0abc94b4b541" gracePeriod=600 Nov 25 10:30:52 crc kubenswrapper[4769]: I1125 10:30:52.758444 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="76c39a79fa79eb13861766be39df967f4ca3f6d724001859f3da0abc94b4b541" exitCode=0 Nov 25 10:30:52 crc kubenswrapper[4769]: I1125 10:30:52.758871 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"76c39a79fa79eb13861766be39df967f4ca3f6d724001859f3da0abc94b4b541"} Nov 25 10:30:52 crc kubenswrapper[4769]: I1125 10:30:52.758904 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0"} Nov 25 10:30:52 crc kubenswrapper[4769]: I1125 10:30:52.758935 4769 scope.go:117] "RemoveContainer" containerID="c0a1ea65b15cd591f5bac79049d2ab64564dd103d3a256253ad54595a631ef3b" Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.290137 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.290742 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.793783 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-426q9"] Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.796954 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.809906 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-426q9"] Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.856508 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2jpw\" (UniqueName: \"kubernetes.io/projected/eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e-kube-api-access-r2jpw\") pod \"redhat-operators-426q9\" (UID: \"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e\") " pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.856824 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e-catalog-content\") pod \"redhat-operators-426q9\" (UID: \"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e\") " pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.856994 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e-utilities\") pod \"redhat-operators-426q9\" (UID: \"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e\") " pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.959992 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e-catalog-content\") pod \"redhat-operators-426q9\" (UID: \"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e\") " pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.960192 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e-utilities\") pod \"redhat-operators-426q9\" (UID: \"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e\") " pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.960310 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2jpw\" (UniqueName: \"kubernetes.io/projected/eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e-kube-api-access-r2jpw\") pod \"redhat-operators-426q9\" (UID: \"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e\") " pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.960664 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e-catalog-content\") pod \"redhat-operators-426q9\" (UID: \"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e\") " pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.961034 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e-utilities\") pod \"redhat-operators-426q9\" (UID: \"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e\") " pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:32:52 crc kubenswrapper[4769]: I1125 10:32:52.992229 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2jpw\" (UniqueName: \"kubernetes.io/projected/eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e-kube-api-access-r2jpw\") pod \"redhat-operators-426q9\" (UID: \"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e\") " pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:32:53 crc kubenswrapper[4769]: I1125 10:32:53.118444 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:32:53 crc kubenswrapper[4769]: I1125 10:32:53.648022 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-426q9"] Nov 25 10:32:54 crc kubenswrapper[4769]: I1125 10:32:54.248155 4769 generic.go:334] "Generic (PLEG): container finished" podID="eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e" containerID="4fe46229d2dae0523a9c1bfe288c9c986609c50cd9d10a41e27a89146c7ee4c9" exitCode=0 Nov 25 10:32:54 crc kubenswrapper[4769]: I1125 10:32:54.253236 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-426q9" event={"ID":"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e","Type":"ContainerDied","Data":"4fe46229d2dae0523a9c1bfe288c9c986609c50cd9d10a41e27a89146c7ee4c9"} Nov 25 10:32:54 crc kubenswrapper[4769]: I1125 10:32:54.253276 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-426q9" event={"ID":"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e","Type":"ContainerStarted","Data":"3b1e9ea30658ee7d0f82fd175b9db04302bf35e91e10d5d5c83b2773283d9a05"} Nov 25 10:33:01 crc kubenswrapper[4769]: I1125 10:33:01.331231 4769 generic.go:334] "Generic (PLEG): container finished" podID="88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" containerID="4e906da4663ad96c4de13873befcd92dde01f5d3bf1226b5d9ef9d1934b91779" exitCode=0 Nov 25 10:33:01 crc kubenswrapper[4769]: I1125 10:33:01.331321 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" event={"ID":"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7","Type":"ContainerDied","Data":"4e906da4663ad96c4de13873befcd92dde01f5d3bf1226b5d9ef9d1934b91779"} Nov 25 10:33:02 crc kubenswrapper[4769]: I1125 10:33:02.358074 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-426q9" event={"ID":"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e","Type":"ContainerStarted","Data":"13ff7561c326b4474f352d9429fac962377a13cd5c08cd00649fac558fcf83ab"} Nov 25 10:33:02 crc kubenswrapper[4769]: I1125 10:33:02.954536 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:33:02 crc kubenswrapper[4769]: I1125 10:33:02.970748 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-migration-ssh-key-1\") pod \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " Nov 25 10:33:02 crc kubenswrapper[4769]: I1125 10:33:02.970800 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-migration-ssh-key-0\") pod \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " Nov 25 10:33:02 crc kubenswrapper[4769]: I1125 10:33:02.970824 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-cell1-compute-config-1\") pod \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " Nov 25 10:33:02 crc kubenswrapper[4769]: I1125 10:33:02.970845 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-cell1-compute-config-0\") pod \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " Nov 25 10:33:02 crc kubenswrapper[4769]: I1125 10:33:02.970915 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dfvx\" (UniqueName: \"kubernetes.io/projected/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-kube-api-access-6dfvx\") pod \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " Nov 25 10:33:02 crc kubenswrapper[4769]: I1125 10:33:02.971025 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-ssh-key\") pod \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " Nov 25 10:33:02 crc kubenswrapper[4769]: I1125 10:33:02.971162 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-combined-ca-bundle\") pod \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " Nov 25 10:33:02 crc kubenswrapper[4769]: I1125 10:33:02.971181 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-inventory\") pod \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " Nov 25 10:33:02 crc kubenswrapper[4769]: I1125 10:33:02.971217 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-extra-config-0\") pod \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\" (UID: \"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7\") " Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.005305 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-kube-api-access-6dfvx" (OuterVolumeSpecName: "kube-api-access-6dfvx") pod "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" (UID: "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7"). InnerVolumeSpecName "kube-api-access-6dfvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.016312 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" (UID: "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.033145 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" (UID: "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.045008 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" (UID: "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.049051 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" (UID: "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.060564 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-inventory" (OuterVolumeSpecName: "inventory") pod "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" (UID: "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.061216 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" (UID: "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.070885 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" (UID: "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.074089 4769 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.074127 4769 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.074144 4769 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.074162 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dfvx\" (UniqueName: \"kubernetes.io/projected/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-kube-api-access-6dfvx\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.074180 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.074196 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.074213 4769 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.074229 4769 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.125514 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" (UID: "88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.175829 4769 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.369314 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" event={"ID":"88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7","Type":"ContainerDied","Data":"0dcc980c4e3b6be926d8b091aa0f7af32d43366d073f2ae2685f911a00019979"} Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.370664 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0dcc980c4e3b6be926d8b091aa0f7af32d43366d073f2ae2685f911a00019979" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.369498 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zms" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.480150 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx"] Nov 25 10:33:03 crc kubenswrapper[4769]: E1125 10:33:03.480914 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.480932 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.481161 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.482118 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.484094 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.484192 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.485086 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.485247 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.487504 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.509583 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx"] Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.582724 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.583037 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.583472 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.583773 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjws7\" (UniqueName: \"kubernetes.io/projected/eef2221d-aa15-41d7-bb96-d2206eef00fb-kube-api-access-hjws7\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.583798 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.583830 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.583903 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.684922 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.685041 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.685078 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.685215 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjws7\" (UniqueName: \"kubernetes.io/projected/eef2221d-aa15-41d7-bb96-d2206eef00fb-kube-api-access-hjws7\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.685242 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.685267 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.685313 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.691950 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.698417 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.782268 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.782360 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.786556 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjws7\" (UniqueName: \"kubernetes.io/projected/eef2221d-aa15-41d7-bb96-d2206eef00fb-kube-api-access-hjws7\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.787490 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.788627 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:03 crc kubenswrapper[4769]: I1125 10:33:03.802252 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:33:04 crc kubenswrapper[4769]: I1125 10:33:04.372744 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx"] Nov 25 10:33:04 crc kubenswrapper[4769]: I1125 10:33:04.383489 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" event={"ID":"eef2221d-aa15-41d7-bb96-d2206eef00fb","Type":"ContainerStarted","Data":"adfe8c57a1cab3a6fb7ac6c77dee520271d3075bb6018f0696bc659611747ef1"} Nov 25 10:33:04 crc kubenswrapper[4769]: I1125 10:33:04.385831 4769 generic.go:334] "Generic (PLEG): container finished" podID="eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e" containerID="13ff7561c326b4474f352d9429fac962377a13cd5c08cd00649fac558fcf83ab" exitCode=0 Nov 25 10:33:04 crc kubenswrapper[4769]: I1125 10:33:04.385872 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-426q9" event={"ID":"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e","Type":"ContainerDied","Data":"13ff7561c326b4474f352d9429fac962377a13cd5c08cd00649fac558fcf83ab"} Nov 25 10:33:07 crc kubenswrapper[4769]: I1125 10:33:07.427529 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-426q9" event={"ID":"eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e","Type":"ContainerStarted","Data":"a216407b3f3aa22a2f670dbe31cc4666e4ecfe4967599c73d3e5ac5c9b2fd265"} Nov 25 10:33:07 crc kubenswrapper[4769]: I1125 10:33:07.430662 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" event={"ID":"eef2221d-aa15-41d7-bb96-d2206eef00fb","Type":"ContainerStarted","Data":"2e0760ddff903d0618ac979891dcebefc9b0e2c59c6294f9a186b50d9b94b208"} Nov 25 10:33:07 crc kubenswrapper[4769]: I1125 10:33:07.457376 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-426q9" podStartSLOduration=3.398175405 podStartE2EDuration="15.45734665s" podCreationTimestamp="2025-11-25 10:32:52 +0000 UTC" firstStartedPulling="2025-11-25 10:32:54.252529628 +0000 UTC m=+2922.837501941" lastFinishedPulling="2025-11-25 10:33:06.311700873 +0000 UTC m=+2934.896673186" observedRunningTime="2025-11-25 10:33:07.449488079 +0000 UTC m=+2936.034460432" watchObservedRunningTime="2025-11-25 10:33:07.45734665 +0000 UTC m=+2936.042319003" Nov 25 10:33:07 crc kubenswrapper[4769]: I1125 10:33:07.477781 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" podStartSLOduration=2.428875388 podStartE2EDuration="4.477752458s" podCreationTimestamp="2025-11-25 10:33:03 +0000 UTC" firstStartedPulling="2025-11-25 10:33:04.374734986 +0000 UTC m=+2932.959707299" lastFinishedPulling="2025-11-25 10:33:06.423612046 +0000 UTC m=+2935.008584369" observedRunningTime="2025-11-25 10:33:07.468586962 +0000 UTC m=+2936.053559285" watchObservedRunningTime="2025-11-25 10:33:07.477752458 +0000 UTC m=+2936.062724801" Nov 25 10:33:13 crc kubenswrapper[4769]: I1125 10:33:13.120177 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:33:13 crc kubenswrapper[4769]: I1125 10:33:13.120510 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:33:13 crc kubenswrapper[4769]: I1125 10:33:13.179945 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:33:13 crc kubenswrapper[4769]: I1125 10:33:13.555370 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-426q9" Nov 25 10:33:13 crc kubenswrapper[4769]: I1125 10:33:13.667041 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-426q9"] Nov 25 10:33:13 crc kubenswrapper[4769]: I1125 10:33:13.782846 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lfhp2"] Nov 25 10:33:13 crc kubenswrapper[4769]: I1125 10:33:13.792454 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lfhp2" podUID="cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" containerName="registry-server" containerID="cri-o://bdb4d39d436d54e0a6e5fd318c207a277a3cd25705a90a36a867724a63110bc4" gracePeriod=2 Nov 25 10:33:14 crc kubenswrapper[4769]: I1125 10:33:14.515900 4769 generic.go:334] "Generic (PLEG): container finished" podID="cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" containerID="bdb4d39d436d54e0a6e5fd318c207a277a3cd25705a90a36a867724a63110bc4" exitCode=0 Nov 25 10:33:14 crc kubenswrapper[4769]: I1125 10:33:14.516022 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfhp2" event={"ID":"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9","Type":"ContainerDied","Data":"bdb4d39d436d54e0a6e5fd318c207a277a3cd25705a90a36a867724a63110bc4"} Nov 25 10:33:14 crc kubenswrapper[4769]: I1125 10:33:14.873530 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 10:33:14 crc kubenswrapper[4769]: I1125 10:33:14.898636 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4vrq\" (UniqueName: \"kubernetes.io/projected/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-kube-api-access-d4vrq\") pod \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\" (UID: \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\") " Nov 25 10:33:14 crc kubenswrapper[4769]: I1125 10:33:14.899271 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-utilities\") pod \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\" (UID: \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\") " Nov 25 10:33:14 crc kubenswrapper[4769]: I1125 10:33:14.899616 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-catalog-content\") pod \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\" (UID: \"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9\") " Nov 25 10:33:14 crc kubenswrapper[4769]: I1125 10:33:14.899695 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-utilities" (OuterVolumeSpecName: "utilities") pod "cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" (UID: "cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:33:14 crc kubenswrapper[4769]: I1125 10:33:14.901075 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:14 crc kubenswrapper[4769]: I1125 10:33:14.924576 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-kube-api-access-d4vrq" (OuterVolumeSpecName: "kube-api-access-d4vrq") pod "cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" (UID: "cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9"). InnerVolumeSpecName "kube-api-access-d4vrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:33:14 crc kubenswrapper[4769]: I1125 10:33:14.996986 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" (UID: "cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:33:15 crc kubenswrapper[4769]: I1125 10:33:15.004067 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4vrq\" (UniqueName: \"kubernetes.io/projected/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-kube-api-access-d4vrq\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:15 crc kubenswrapper[4769]: I1125 10:33:15.004110 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:15 crc kubenswrapper[4769]: I1125 10:33:15.530202 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lfhp2" Nov 25 10:33:15 crc kubenswrapper[4769]: I1125 10:33:15.540849 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lfhp2" event={"ID":"cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9","Type":"ContainerDied","Data":"b139317a32f9848714df4eb5058e8f938259eb8f935188b3d5449c1c8bb5b0b6"} Nov 25 10:33:15 crc kubenswrapper[4769]: I1125 10:33:15.540949 4769 scope.go:117] "RemoveContainer" containerID="bdb4d39d436d54e0a6e5fd318c207a277a3cd25705a90a36a867724a63110bc4" Nov 25 10:33:15 crc kubenswrapper[4769]: I1125 10:33:15.581908 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lfhp2"] Nov 25 10:33:15 crc kubenswrapper[4769]: I1125 10:33:15.583114 4769 scope.go:117] "RemoveContainer" containerID="ed979bd7fe6d53bbaf385aaa5f86e45932ed2fbbb0a03d68b1669dfe2f9bbb46" Nov 25 10:33:15 crc kubenswrapper[4769]: I1125 10:33:15.596819 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lfhp2"] Nov 25 10:33:15 crc kubenswrapper[4769]: I1125 10:33:15.618213 4769 scope.go:117] "RemoveContainer" containerID="64ace453db6ff165529c2ad106e90c1e2294bdcb16367bdb037eadaad2360688" Nov 25 10:33:16 crc kubenswrapper[4769]: I1125 10:33:16.284617 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" path="/var/lib/kubelet/pods/cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9/volumes" Nov 25 10:33:22 crc kubenswrapper[4769]: I1125 10:33:22.299209 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:33:22 crc kubenswrapper[4769]: I1125 10:33:22.299691 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.746121 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wcz8m"] Nov 25 10:33:50 crc kubenswrapper[4769]: E1125 10:33:50.748007 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" containerName="extract-content" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.748038 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" containerName="extract-content" Nov 25 10:33:50 crc kubenswrapper[4769]: E1125 10:33:50.748083 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" containerName="extract-utilities" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.748104 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" containerName="extract-utilities" Nov 25 10:33:50 crc kubenswrapper[4769]: E1125 10:33:50.748211 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" containerName="registry-server" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.748232 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" containerName="registry-server" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.748847 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbb2033d-5fd4-4e6f-8a2c-819ce8b5deb9" containerName="registry-server" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.752868 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.756002 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wcz8m"] Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.889390 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c6b2991-79c5-48d4-bf12-b302916e5e54-utilities\") pod \"redhat-marketplace-wcz8m\" (UID: \"2c6b2991-79c5-48d4-bf12-b302916e5e54\") " pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.889794 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dxxc\" (UniqueName: \"kubernetes.io/projected/2c6b2991-79c5-48d4-bf12-b302916e5e54-kube-api-access-7dxxc\") pod \"redhat-marketplace-wcz8m\" (UID: \"2c6b2991-79c5-48d4-bf12-b302916e5e54\") " pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.889847 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c6b2991-79c5-48d4-bf12-b302916e5e54-catalog-content\") pod \"redhat-marketplace-wcz8m\" (UID: \"2c6b2991-79c5-48d4-bf12-b302916e5e54\") " pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.992278 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c6b2991-79c5-48d4-bf12-b302916e5e54-catalog-content\") pod \"redhat-marketplace-wcz8m\" (UID: \"2c6b2991-79c5-48d4-bf12-b302916e5e54\") " pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.992670 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c6b2991-79c5-48d4-bf12-b302916e5e54-utilities\") pod \"redhat-marketplace-wcz8m\" (UID: \"2c6b2991-79c5-48d4-bf12-b302916e5e54\") " pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.992736 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c6b2991-79c5-48d4-bf12-b302916e5e54-catalog-content\") pod \"redhat-marketplace-wcz8m\" (UID: \"2c6b2991-79c5-48d4-bf12-b302916e5e54\") " pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.992772 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dxxc\" (UniqueName: \"kubernetes.io/projected/2c6b2991-79c5-48d4-bf12-b302916e5e54-kube-api-access-7dxxc\") pod \"redhat-marketplace-wcz8m\" (UID: \"2c6b2991-79c5-48d4-bf12-b302916e5e54\") " pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:33:50 crc kubenswrapper[4769]: I1125 10:33:50.993620 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c6b2991-79c5-48d4-bf12-b302916e5e54-utilities\") pod \"redhat-marketplace-wcz8m\" (UID: \"2c6b2991-79c5-48d4-bf12-b302916e5e54\") " pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:33:51 crc kubenswrapper[4769]: I1125 10:33:51.015869 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dxxc\" (UniqueName: \"kubernetes.io/projected/2c6b2991-79c5-48d4-bf12-b302916e5e54-kube-api-access-7dxxc\") pod \"redhat-marketplace-wcz8m\" (UID: \"2c6b2991-79c5-48d4-bf12-b302916e5e54\") " pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:33:51 crc kubenswrapper[4769]: I1125 10:33:51.084639 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:33:51 crc kubenswrapper[4769]: I1125 10:33:51.607399 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wcz8m"] Nov 25 10:33:51 crc kubenswrapper[4769]: I1125 10:33:51.982347 4769 generic.go:334] "Generic (PLEG): container finished" podID="2c6b2991-79c5-48d4-bf12-b302916e5e54" containerID="d766b2a93a14ed7f3ca74cdb69090f52adfad5633db04c109862a148e555b981" exitCode=0 Nov 25 10:33:51 crc kubenswrapper[4769]: I1125 10:33:51.982538 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wcz8m" event={"ID":"2c6b2991-79c5-48d4-bf12-b302916e5e54","Type":"ContainerDied","Data":"d766b2a93a14ed7f3ca74cdb69090f52adfad5633db04c109862a148e555b981"} Nov 25 10:33:51 crc kubenswrapper[4769]: I1125 10:33:51.982664 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wcz8m" event={"ID":"2c6b2991-79c5-48d4-bf12-b302916e5e54","Type":"ContainerStarted","Data":"9fdb5a590ca97259bce7f0d0cb399ffea7dcab9d7103346db12dbd344fa8fbb7"} Nov 25 10:33:52 crc kubenswrapper[4769]: I1125 10:33:52.290749 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:33:52 crc kubenswrapper[4769]: I1125 10:33:52.291013 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:33:52 crc kubenswrapper[4769]: I1125 10:33:52.291112 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 10:33:52 crc kubenswrapper[4769]: I1125 10:33:52.292167 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:33:52 crc kubenswrapper[4769]: I1125 10:33:52.292297 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" gracePeriod=600 Nov 25 10:33:52 crc kubenswrapper[4769]: E1125 10:33:52.413816 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:33:53 crc kubenswrapper[4769]: I1125 10:33:53.002760 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wcz8m" event={"ID":"2c6b2991-79c5-48d4-bf12-b302916e5e54","Type":"ContainerStarted","Data":"537df45f8c7683d8d965414d9f4860460fbd9325e4b3fb610dea3521108047ca"} Nov 25 10:33:53 crc kubenswrapper[4769]: I1125 10:33:53.006787 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" exitCode=0 Nov 25 10:33:53 crc kubenswrapper[4769]: I1125 10:33:53.006831 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0"} Nov 25 10:33:53 crc kubenswrapper[4769]: I1125 10:33:53.006866 4769 scope.go:117] "RemoveContainer" containerID="76c39a79fa79eb13861766be39df967f4ca3f6d724001859f3da0abc94b4b541" Nov 25 10:33:53 crc kubenswrapper[4769]: I1125 10:33:53.007785 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:33:53 crc kubenswrapper[4769]: E1125 10:33:53.008250 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:33:54 crc kubenswrapper[4769]: I1125 10:33:54.018297 4769 generic.go:334] "Generic (PLEG): container finished" podID="2c6b2991-79c5-48d4-bf12-b302916e5e54" containerID="537df45f8c7683d8d965414d9f4860460fbd9325e4b3fb610dea3521108047ca" exitCode=0 Nov 25 10:33:54 crc kubenswrapper[4769]: I1125 10:33:54.018488 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wcz8m" event={"ID":"2c6b2991-79c5-48d4-bf12-b302916e5e54","Type":"ContainerDied","Data":"537df45f8c7683d8d965414d9f4860460fbd9325e4b3fb610dea3521108047ca"} Nov 25 10:33:55 crc kubenswrapper[4769]: I1125 10:33:55.032367 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wcz8m" event={"ID":"2c6b2991-79c5-48d4-bf12-b302916e5e54","Type":"ContainerStarted","Data":"7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3"} Nov 25 10:33:55 crc kubenswrapper[4769]: I1125 10:33:55.054579 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wcz8m" podStartSLOduration=2.563636972 podStartE2EDuration="5.054559091s" podCreationTimestamp="2025-11-25 10:33:50 +0000 UTC" firstStartedPulling="2025-11-25 10:33:51.984293493 +0000 UTC m=+2980.569265806" lastFinishedPulling="2025-11-25 10:33:54.475215612 +0000 UTC m=+2983.060187925" observedRunningTime="2025-11-25 10:33:55.046707322 +0000 UTC m=+2983.631679635" watchObservedRunningTime="2025-11-25 10:33:55.054559091 +0000 UTC m=+2983.639531404" Nov 25 10:34:01 crc kubenswrapper[4769]: I1125 10:34:01.085290 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:34:01 crc kubenswrapper[4769]: I1125 10:34:01.085885 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:34:01 crc kubenswrapper[4769]: I1125 10:34:01.165038 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:34:01 crc kubenswrapper[4769]: I1125 10:34:01.252732 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:34:01 crc kubenswrapper[4769]: I1125 10:34:01.404106 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wcz8m"] Nov 25 10:34:03 crc kubenswrapper[4769]: I1125 10:34:03.116122 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wcz8m" podUID="2c6b2991-79c5-48d4-bf12-b302916e5e54" containerName="registry-server" containerID="cri-o://7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3" gracePeriod=2 Nov 25 10:34:03 crc kubenswrapper[4769]: I1125 10:34:03.237692 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:34:03 crc kubenswrapper[4769]: E1125 10:34:03.238061 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:34:03 crc kubenswrapper[4769]: I1125 10:34:03.660568 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:34:03 crc kubenswrapper[4769]: I1125 10:34:03.822256 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dxxc\" (UniqueName: \"kubernetes.io/projected/2c6b2991-79c5-48d4-bf12-b302916e5e54-kube-api-access-7dxxc\") pod \"2c6b2991-79c5-48d4-bf12-b302916e5e54\" (UID: \"2c6b2991-79c5-48d4-bf12-b302916e5e54\") " Nov 25 10:34:03 crc kubenswrapper[4769]: I1125 10:34:03.822495 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c6b2991-79c5-48d4-bf12-b302916e5e54-utilities\") pod \"2c6b2991-79c5-48d4-bf12-b302916e5e54\" (UID: \"2c6b2991-79c5-48d4-bf12-b302916e5e54\") " Nov 25 10:34:03 crc kubenswrapper[4769]: I1125 10:34:03.822552 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c6b2991-79c5-48d4-bf12-b302916e5e54-catalog-content\") pod \"2c6b2991-79c5-48d4-bf12-b302916e5e54\" (UID: \"2c6b2991-79c5-48d4-bf12-b302916e5e54\") " Nov 25 10:34:03 crc kubenswrapper[4769]: I1125 10:34:03.823941 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c6b2991-79c5-48d4-bf12-b302916e5e54-utilities" (OuterVolumeSpecName: "utilities") pod "2c6b2991-79c5-48d4-bf12-b302916e5e54" (UID: "2c6b2991-79c5-48d4-bf12-b302916e5e54"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:34:03 crc kubenswrapper[4769]: I1125 10:34:03.830431 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c6b2991-79c5-48d4-bf12-b302916e5e54-kube-api-access-7dxxc" (OuterVolumeSpecName: "kube-api-access-7dxxc") pod "2c6b2991-79c5-48d4-bf12-b302916e5e54" (UID: "2c6b2991-79c5-48d4-bf12-b302916e5e54"). InnerVolumeSpecName "kube-api-access-7dxxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:34:03 crc kubenswrapper[4769]: I1125 10:34:03.853427 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c6b2991-79c5-48d4-bf12-b302916e5e54-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2c6b2991-79c5-48d4-bf12-b302916e5e54" (UID: "2c6b2991-79c5-48d4-bf12-b302916e5e54"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:34:03 crc kubenswrapper[4769]: I1125 10:34:03.926617 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dxxc\" (UniqueName: \"kubernetes.io/projected/2c6b2991-79c5-48d4-bf12-b302916e5e54-kube-api-access-7dxxc\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:03 crc kubenswrapper[4769]: I1125 10:34:03.926678 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c6b2991-79c5-48d4-bf12-b302916e5e54-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:03 crc kubenswrapper[4769]: I1125 10:34:03.926696 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c6b2991-79c5-48d4-bf12-b302916e5e54-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.132422 4769 generic.go:334] "Generic (PLEG): container finished" podID="2c6b2991-79c5-48d4-bf12-b302916e5e54" containerID="7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3" exitCode=0 Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.132489 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wcz8m" event={"ID":"2c6b2991-79c5-48d4-bf12-b302916e5e54","Type":"ContainerDied","Data":"7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3"} Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.132530 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wcz8m" event={"ID":"2c6b2991-79c5-48d4-bf12-b302916e5e54","Type":"ContainerDied","Data":"9fdb5a590ca97259bce7f0d0cb399ffea7dcab9d7103346db12dbd344fa8fbb7"} Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.132555 4769 scope.go:117] "RemoveContainer" containerID="7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3" Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.133896 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wcz8m" Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.160164 4769 scope.go:117] "RemoveContainer" containerID="537df45f8c7683d8d965414d9f4860460fbd9325e4b3fb610dea3521108047ca" Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.190011 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wcz8m"] Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.198128 4769 scope.go:117] "RemoveContainer" containerID="d766b2a93a14ed7f3ca74cdb69090f52adfad5633db04c109862a148e555b981" Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.207927 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wcz8m"] Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.256798 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c6b2991-79c5-48d4-bf12-b302916e5e54" path="/var/lib/kubelet/pods/2c6b2991-79c5-48d4-bf12-b302916e5e54/volumes" Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.274822 4769 scope.go:117] "RemoveContainer" containerID="7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3" Nov 25 10:34:04 crc kubenswrapper[4769]: E1125 10:34:04.275507 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3\": container with ID starting with 7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3 not found: ID does not exist" containerID="7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3" Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.275555 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3"} err="failed to get container status \"7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3\": rpc error: code = NotFound desc = could not find container \"7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3\": container with ID starting with 7a630a2ee61dec845b95a67e4d8a3694d1418384302d2423b3e3cd78e6c72da3 not found: ID does not exist" Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.275580 4769 scope.go:117] "RemoveContainer" containerID="537df45f8c7683d8d965414d9f4860460fbd9325e4b3fb610dea3521108047ca" Nov 25 10:34:04 crc kubenswrapper[4769]: E1125 10:34:04.275931 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"537df45f8c7683d8d965414d9f4860460fbd9325e4b3fb610dea3521108047ca\": container with ID starting with 537df45f8c7683d8d965414d9f4860460fbd9325e4b3fb610dea3521108047ca not found: ID does not exist" containerID="537df45f8c7683d8d965414d9f4860460fbd9325e4b3fb610dea3521108047ca" Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.275982 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"537df45f8c7683d8d965414d9f4860460fbd9325e4b3fb610dea3521108047ca"} err="failed to get container status \"537df45f8c7683d8d965414d9f4860460fbd9325e4b3fb610dea3521108047ca\": rpc error: code = NotFound desc = could not find container \"537df45f8c7683d8d965414d9f4860460fbd9325e4b3fb610dea3521108047ca\": container with ID starting with 537df45f8c7683d8d965414d9f4860460fbd9325e4b3fb610dea3521108047ca not found: ID does not exist" Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.276013 4769 scope.go:117] "RemoveContainer" containerID="d766b2a93a14ed7f3ca74cdb69090f52adfad5633db04c109862a148e555b981" Nov 25 10:34:04 crc kubenswrapper[4769]: E1125 10:34:04.276421 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d766b2a93a14ed7f3ca74cdb69090f52adfad5633db04c109862a148e555b981\": container with ID starting with d766b2a93a14ed7f3ca74cdb69090f52adfad5633db04c109862a148e555b981 not found: ID does not exist" containerID="d766b2a93a14ed7f3ca74cdb69090f52adfad5633db04c109862a148e555b981" Nov 25 10:34:04 crc kubenswrapper[4769]: I1125 10:34:04.276464 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d766b2a93a14ed7f3ca74cdb69090f52adfad5633db04c109862a148e555b981"} err="failed to get container status \"d766b2a93a14ed7f3ca74cdb69090f52adfad5633db04c109862a148e555b981\": rpc error: code = NotFound desc = could not find container \"d766b2a93a14ed7f3ca74cdb69090f52adfad5633db04c109862a148e555b981\": container with ID starting with d766b2a93a14ed7f3ca74cdb69090f52adfad5633db04c109862a148e555b981 not found: ID does not exist" Nov 25 10:34:14 crc kubenswrapper[4769]: I1125 10:34:14.242698 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:34:14 crc kubenswrapper[4769]: E1125 10:34:14.245694 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:34:28 crc kubenswrapper[4769]: I1125 10:34:28.237722 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:34:28 crc kubenswrapper[4769]: E1125 10:34:28.240246 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:34:39 crc kubenswrapper[4769]: I1125 10:34:39.237748 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:34:39 crc kubenswrapper[4769]: E1125 10:34:39.238576 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:34:50 crc kubenswrapper[4769]: I1125 10:34:50.237452 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:34:50 crc kubenswrapper[4769]: E1125 10:34:50.238568 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:35:01 crc kubenswrapper[4769]: I1125 10:35:01.237620 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:35:01 crc kubenswrapper[4769]: E1125 10:35:01.238684 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:35:14 crc kubenswrapper[4769]: I1125 10:35:14.242059 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:35:14 crc kubenswrapper[4769]: E1125 10:35:14.243300 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:35:26 crc kubenswrapper[4769]: I1125 10:35:26.237017 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:35:26 crc kubenswrapper[4769]: E1125 10:35:26.238102 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:35:31 crc kubenswrapper[4769]: I1125 10:35:31.304687 4769 generic.go:334] "Generic (PLEG): container finished" podID="eef2221d-aa15-41d7-bb96-d2206eef00fb" containerID="2e0760ddff903d0618ac979891dcebefc9b0e2c59c6294f9a186b50d9b94b208" exitCode=0 Nov 25 10:35:31 crc kubenswrapper[4769]: I1125 10:35:31.304820 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" event={"ID":"eef2221d-aa15-41d7-bb96-d2206eef00fb","Type":"ContainerDied","Data":"2e0760ddff903d0618ac979891dcebefc9b0e2c59c6294f9a186b50d9b94b208"} Nov 25 10:35:32 crc kubenswrapper[4769]: I1125 10:35:32.830655 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:35:32 crc kubenswrapper[4769]: I1125 10:35:32.899854 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-telemetry-combined-ca-bundle\") pod \"eef2221d-aa15-41d7-bb96-d2206eef00fb\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " Nov 25 10:35:32 crc kubenswrapper[4769]: I1125 10:35:32.900222 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-1\") pod \"eef2221d-aa15-41d7-bb96-d2206eef00fb\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " Nov 25 10:35:32 crc kubenswrapper[4769]: I1125 10:35:32.900293 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjws7\" (UniqueName: \"kubernetes.io/projected/eef2221d-aa15-41d7-bb96-d2206eef00fb-kube-api-access-hjws7\") pod \"eef2221d-aa15-41d7-bb96-d2206eef00fb\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " Nov 25 10:35:32 crc kubenswrapper[4769]: I1125 10:35:32.900341 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ssh-key\") pod \"eef2221d-aa15-41d7-bb96-d2206eef00fb\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " Nov 25 10:35:32 crc kubenswrapper[4769]: I1125 10:35:32.900403 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-inventory\") pod \"eef2221d-aa15-41d7-bb96-d2206eef00fb\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " Nov 25 10:35:32 crc kubenswrapper[4769]: I1125 10:35:32.900516 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-2\") pod \"eef2221d-aa15-41d7-bb96-d2206eef00fb\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " Nov 25 10:35:32 crc kubenswrapper[4769]: I1125 10:35:32.900560 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-0\") pod \"eef2221d-aa15-41d7-bb96-d2206eef00fb\" (UID: \"eef2221d-aa15-41d7-bb96-d2206eef00fb\") " Nov 25 10:35:32 crc kubenswrapper[4769]: I1125 10:35:32.947397 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eef2221d-aa15-41d7-bb96-d2206eef00fb-kube-api-access-hjws7" (OuterVolumeSpecName: "kube-api-access-hjws7") pod "eef2221d-aa15-41d7-bb96-d2206eef00fb" (UID: "eef2221d-aa15-41d7-bb96-d2206eef00fb"). InnerVolumeSpecName "kube-api-access-hjws7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:35:32 crc kubenswrapper[4769]: I1125 10:35:32.957173 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "eef2221d-aa15-41d7-bb96-d2206eef00fb" (UID: "eef2221d-aa15-41d7-bb96-d2206eef00fb"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.009144 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "eef2221d-aa15-41d7-bb96-d2206eef00fb" (UID: "eef2221d-aa15-41d7-bb96-d2206eef00fb"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.022364 4769 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.022493 4769 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.022580 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjws7\" (UniqueName: \"kubernetes.io/projected/eef2221d-aa15-41d7-bb96-d2206eef00fb-kube-api-access-hjws7\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.079464 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-inventory" (OuterVolumeSpecName: "inventory") pod "eef2221d-aa15-41d7-bb96-d2206eef00fb" (UID: "eef2221d-aa15-41d7-bb96-d2206eef00fb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.100012 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "eef2221d-aa15-41d7-bb96-d2206eef00fb" (UID: "eef2221d-aa15-41d7-bb96-d2206eef00fb"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.120071 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "eef2221d-aa15-41d7-bb96-d2206eef00fb" (UID: "eef2221d-aa15-41d7-bb96-d2206eef00fb"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.125650 4769 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.125680 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.125696 4769 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.157107 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "eef2221d-aa15-41d7-bb96-d2206eef00fb" (UID: "eef2221d-aa15-41d7-bb96-d2206eef00fb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.228204 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eef2221d-aa15-41d7-bb96-d2206eef00fb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.330026 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.330158 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx" event={"ID":"eef2221d-aa15-41d7-bb96-d2206eef00fb","Type":"ContainerDied","Data":"adfe8c57a1cab3a6fb7ac6c77dee520271d3075bb6018f0696bc659611747ef1"} Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.330304 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="adfe8c57a1cab3a6fb7ac6c77dee520271d3075bb6018f0696bc659611747ef1" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.466539 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw"] Nov 25 10:35:33 crc kubenswrapper[4769]: E1125 10:35:33.467162 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c6b2991-79c5-48d4-bf12-b302916e5e54" containerName="extract-content" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.467183 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c6b2991-79c5-48d4-bf12-b302916e5e54" containerName="extract-content" Nov 25 10:35:33 crc kubenswrapper[4769]: E1125 10:35:33.467241 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eef2221d-aa15-41d7-bb96-d2206eef00fb" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.467251 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="eef2221d-aa15-41d7-bb96-d2206eef00fb" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 10:35:33 crc kubenswrapper[4769]: E1125 10:35:33.467262 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c6b2991-79c5-48d4-bf12-b302916e5e54" containerName="registry-server" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.467272 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c6b2991-79c5-48d4-bf12-b302916e5e54" containerName="registry-server" Nov 25 10:35:33 crc kubenswrapper[4769]: E1125 10:35:33.467294 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c6b2991-79c5-48d4-bf12-b302916e5e54" containerName="extract-utilities" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.467304 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c6b2991-79c5-48d4-bf12-b302916e5e54" containerName="extract-utilities" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.467608 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="eef2221d-aa15-41d7-bb96-d2206eef00fb" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.467657 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c6b2991-79c5-48d4-bf12-b302916e5e54" containerName="registry-server" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.468622 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.471238 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.471446 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.471868 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.472399 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.472688 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-ipmi-config-data" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.487316 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw"] Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.534587 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.534674 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.534935 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.535113 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.535549 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.535695 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.536001 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnxbx\" (UniqueName: \"kubernetes.io/projected/116b0564-3e29-4358-a713-663b8f4a156a-kube-api-access-jnxbx\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.637981 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.638367 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.638449 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnxbx\" (UniqueName: \"kubernetes.io/projected/116b0564-3e29-4358-a713-663b8f4a156a-kube-api-access-jnxbx\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.638496 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.638542 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.638565 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.638594 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.643398 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.644037 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.644805 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.656459 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.658577 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.660234 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.671935 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnxbx\" (UniqueName: \"kubernetes.io/projected/116b0564-3e29-4358-a713-663b8f4a156a-kube-api-access-jnxbx\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:33 crc kubenswrapper[4769]: I1125 10:35:33.787546 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:35:34 crc kubenswrapper[4769]: I1125 10:35:34.379611 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw"] Nov 25 10:35:34 crc kubenswrapper[4769]: I1125 10:35:34.384720 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:35:35 crc kubenswrapper[4769]: I1125 10:35:35.360720 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" event={"ID":"116b0564-3e29-4358-a713-663b8f4a156a","Type":"ContainerStarted","Data":"77ccb08c5c6582fd9b998f84bcd90494e5ec619a3d387547c1b93960576b827f"} Nov 25 10:35:35 crc kubenswrapper[4769]: I1125 10:35:35.361226 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" event={"ID":"116b0564-3e29-4358-a713-663b8f4a156a","Type":"ContainerStarted","Data":"f374cbd3355cfabf471dd35ab1d0b9cad4e6e19bfb152eeb836776bd00a61fa6"} Nov 25 10:35:35 crc kubenswrapper[4769]: I1125 10:35:35.378367 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" podStartSLOduration=1.865810106 podStartE2EDuration="2.378347611s" podCreationTimestamp="2025-11-25 10:35:33 +0000 UTC" firstStartedPulling="2025-11-25 10:35:34.384492684 +0000 UTC m=+3082.969464997" lastFinishedPulling="2025-11-25 10:35:34.897030189 +0000 UTC m=+3083.482002502" observedRunningTime="2025-11-25 10:35:35.376622285 +0000 UTC m=+3083.961594608" watchObservedRunningTime="2025-11-25 10:35:35.378347611 +0000 UTC m=+3083.963319924" Nov 25 10:35:41 crc kubenswrapper[4769]: I1125 10:35:41.237999 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:35:41 crc kubenswrapper[4769]: E1125 10:35:41.238936 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:35:55 crc kubenswrapper[4769]: I1125 10:35:55.238720 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:35:55 crc kubenswrapper[4769]: E1125 10:35:55.239597 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:36:10 crc kubenswrapper[4769]: I1125 10:36:10.237547 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:36:10 crc kubenswrapper[4769]: E1125 10:36:10.239328 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:36:23 crc kubenswrapper[4769]: I1125 10:36:23.237493 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:36:23 crc kubenswrapper[4769]: E1125 10:36:23.238626 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:36:37 crc kubenswrapper[4769]: I1125 10:36:37.238644 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:36:37 crc kubenswrapper[4769]: E1125 10:36:37.240181 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:36:52 crc kubenswrapper[4769]: I1125 10:36:52.249609 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:36:52 crc kubenswrapper[4769]: E1125 10:36:52.252304 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:37:03 crc kubenswrapper[4769]: I1125 10:37:03.237937 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:37:03 crc kubenswrapper[4769]: E1125 10:37:03.239150 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:37:18 crc kubenswrapper[4769]: I1125 10:37:18.237344 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:37:18 crc kubenswrapper[4769]: E1125 10:37:18.238791 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:37:31 crc kubenswrapper[4769]: I1125 10:37:31.237733 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:37:31 crc kubenswrapper[4769]: E1125 10:37:31.238804 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:37:36 crc kubenswrapper[4769]: I1125 10:37:36.918870 4769 generic.go:334] "Generic (PLEG): container finished" podID="116b0564-3e29-4358-a713-663b8f4a156a" containerID="77ccb08c5c6582fd9b998f84bcd90494e5ec619a3d387547c1b93960576b827f" exitCode=0 Nov 25 10:37:36 crc kubenswrapper[4769]: I1125 10:37:36.919025 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" event={"ID":"116b0564-3e29-4358-a713-663b8f4a156a","Type":"ContainerDied","Data":"77ccb08c5c6582fd9b998f84bcd90494e5ec619a3d387547c1b93960576b827f"} Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.490299 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.597002 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-1\") pod \"116b0564-3e29-4358-a713-663b8f4a156a\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.597077 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-inventory\") pod \"116b0564-3e29-4358-a713-663b8f4a156a\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.597118 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnxbx\" (UniqueName: \"kubernetes.io/projected/116b0564-3e29-4358-a713-663b8f4a156a-kube-api-access-jnxbx\") pod \"116b0564-3e29-4358-a713-663b8f4a156a\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.597268 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-2\") pod \"116b0564-3e29-4358-a713-663b8f4a156a\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.597365 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ssh-key\") pod \"116b0564-3e29-4358-a713-663b8f4a156a\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.597404 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-0\") pod \"116b0564-3e29-4358-a713-663b8f4a156a\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.598390 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-telemetry-power-monitoring-combined-ca-bundle\") pod \"116b0564-3e29-4358-a713-663b8f4a156a\" (UID: \"116b0564-3e29-4358-a713-663b8f4a156a\") " Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.603048 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/116b0564-3e29-4358-a713-663b8f4a156a-kube-api-access-jnxbx" (OuterVolumeSpecName: "kube-api-access-jnxbx") pod "116b0564-3e29-4358-a713-663b8f4a156a" (UID: "116b0564-3e29-4358-a713-663b8f4a156a"). InnerVolumeSpecName "kube-api-access-jnxbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.617838 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "116b0564-3e29-4358-a713-663b8f4a156a" (UID: "116b0564-3e29-4358-a713-663b8f4a156a"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.636727 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "116b0564-3e29-4358-a713-663b8f4a156a" (UID: "116b0564-3e29-4358-a713-663b8f4a156a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.637236 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-2" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-2") pod "116b0564-3e29-4358-a713-663b8f4a156a" (UID: "116b0564-3e29-4358-a713-663b8f4a156a"). InnerVolumeSpecName "ceilometer-ipmi-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.645201 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-inventory" (OuterVolumeSpecName: "inventory") pod "116b0564-3e29-4358-a713-663b8f4a156a" (UID: "116b0564-3e29-4358-a713-663b8f4a156a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.645735 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-0" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-0") pod "116b0564-3e29-4358-a713-663b8f4a156a" (UID: "116b0564-3e29-4358-a713-663b8f4a156a"). InnerVolumeSpecName "ceilometer-ipmi-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.648062 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-1" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-1") pod "116b0564-3e29-4358-a713-663b8f4a156a" (UID: "116b0564-3e29-4358-a713-663b8f4a156a"). InnerVolumeSpecName "ceilometer-ipmi-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.702014 4769 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.702058 4769 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.702074 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.702088 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnxbx\" (UniqueName: \"kubernetes.io/projected/116b0564-3e29-4358-a713-663b8f4a156a-kube-api-access-jnxbx\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.702103 4769 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.702115 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.702128 4769 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/116b0564-3e29-4358-a713-663b8f4a156a-ceilometer-ipmi-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.958902 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" event={"ID":"116b0564-3e29-4358-a713-663b8f4a156a","Type":"ContainerDied","Data":"f374cbd3355cfabf471dd35ab1d0b9cad4e6e19bfb152eeb836776bd00a61fa6"} Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.958993 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f374cbd3355cfabf471dd35ab1d0b9cad4e6e19bfb152eeb836776bd00a61fa6" Nov 25 10:37:38 crc kubenswrapper[4769]: I1125 10:37:38.959014 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.061610 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n"] Nov 25 10:37:39 crc kubenswrapper[4769]: E1125 10:37:39.062184 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="116b0564-3e29-4358-a713-663b8f4a156a" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.062209 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="116b0564-3e29-4358-a713-663b8f4a156a" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.062506 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="116b0564-3e29-4358-a713-663b8f4a156a" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.063692 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.066692 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-skqtv" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.066933 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"logging-compute-config-data" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.067777 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.068864 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.068929 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.079377 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n"] Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.214258 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68sqc\" (UniqueName: \"kubernetes.io/projected/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-kube-api-access-68sqc\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.214310 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.214345 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.214452 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.214677 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.317682 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68sqc\" (UniqueName: \"kubernetes.io/projected/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-kube-api-access-68sqc\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.318120 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.318151 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.318286 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.319176 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.324776 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.327428 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.327771 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.331372 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.342288 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68sqc\" (UniqueName: \"kubernetes.io/projected/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-kube-api-access-68sqc\") pod \"logging-edpm-deployment-openstack-edpm-ipam-mrb5n\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:39 crc kubenswrapper[4769]: I1125 10:37:39.396780 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:40 crc kubenswrapper[4769]: I1125 10:37:40.016353 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n"] Nov 25 10:37:40 crc kubenswrapper[4769]: I1125 10:37:40.993815 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" event={"ID":"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb","Type":"ContainerStarted","Data":"68d6ffc05b64722441cb018776b777ddcfba05709c1c448b1b9bb98c7e77d808"} Nov 25 10:37:40 crc kubenswrapper[4769]: I1125 10:37:40.994109 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" event={"ID":"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb","Type":"ContainerStarted","Data":"15a41dee8986c2e3c34a26642572d79441cd026c60a1e05b7001220e134451b5"} Nov 25 10:37:41 crc kubenswrapper[4769]: I1125 10:37:41.029875 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" podStartSLOduration=1.5069944720000001 podStartE2EDuration="2.029846833s" podCreationTimestamp="2025-11-25 10:37:39 +0000 UTC" firstStartedPulling="2025-11-25 10:37:40.022980208 +0000 UTC m=+3208.607952521" lastFinishedPulling="2025-11-25 10:37:40.545832569 +0000 UTC m=+3209.130804882" observedRunningTime="2025-11-25 10:37:41.012757746 +0000 UTC m=+3209.597730089" watchObservedRunningTime="2025-11-25 10:37:41.029846833 +0000 UTC m=+3209.614819186" Nov 25 10:37:44 crc kubenswrapper[4769]: I1125 10:37:44.237636 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:37:44 crc kubenswrapper[4769]: E1125 10:37:44.238671 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:37:55 crc kubenswrapper[4769]: E1125 10:37:55.947248 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a1e04d8_9fba_4f14_a9ac_845e6bd82fcb.slice/crio-68d6ffc05b64722441cb018776b777ddcfba05709c1c448b1b9bb98c7e77d808.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a1e04d8_9fba_4f14_a9ac_845e6bd82fcb.slice/crio-conmon-68d6ffc05b64722441cb018776b777ddcfba05709c1c448b1b9bb98c7e77d808.scope\": RecentStats: unable to find data in memory cache]" Nov 25 10:37:56 crc kubenswrapper[4769]: I1125 10:37:56.321647 4769 generic.go:334] "Generic (PLEG): container finished" podID="1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb" containerID="68d6ffc05b64722441cb018776b777ddcfba05709c1c448b1b9bb98c7e77d808" exitCode=0 Nov 25 10:37:56 crc kubenswrapper[4769]: I1125 10:37:56.321729 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" event={"ID":"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb","Type":"ContainerDied","Data":"68d6ffc05b64722441cb018776b777ddcfba05709c1c448b1b9bb98c7e77d808"} Nov 25 10:37:57 crc kubenswrapper[4769]: I1125 10:37:57.886664 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.048900 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-ssh-key\") pod \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.049318 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-logging-compute-config-data-1\") pod \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.049357 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-logging-compute-config-data-0\") pod \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.050063 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-inventory\") pod \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.050153 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-68sqc\" (UniqueName: \"kubernetes.io/projected/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-kube-api-access-68sqc\") pod \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\" (UID: \"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb\") " Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.055241 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-kube-api-access-68sqc" (OuterVolumeSpecName: "kube-api-access-68sqc") pod "1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb" (UID: "1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb"). InnerVolumeSpecName "kube-api-access-68sqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.087900 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-logging-compute-config-data-1" (OuterVolumeSpecName: "logging-compute-config-data-1") pod "1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb" (UID: "1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb"). InnerVolumeSpecName "logging-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.088601 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-inventory" (OuterVolumeSpecName: "inventory") pod "1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb" (UID: "1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.088941 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-logging-compute-config-data-0" (OuterVolumeSpecName: "logging-compute-config-data-0") pod "1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb" (UID: "1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb"). InnerVolumeSpecName "logging-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.090571 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb" (UID: "1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.154061 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.154090 4769 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-logging-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.154120 4769 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-logging-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.154132 4769 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.154152 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-68sqc\" (UniqueName: \"kubernetes.io/projected/1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb-kube-api-access-68sqc\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.238632 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:37:58 crc kubenswrapper[4769]: E1125 10:37:58.239512 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.347724 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" event={"ID":"1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb","Type":"ContainerDied","Data":"15a41dee8986c2e3c34a26642572d79441cd026c60a1e05b7001220e134451b5"} Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.347766 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15a41dee8986c2e3c34a26642572d79441cd026c60a1e05b7001220e134451b5" Nov 25 10:37:58 crc kubenswrapper[4769]: I1125 10:37:58.347798 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-mrb5n" Nov 25 10:38:09 crc kubenswrapper[4769]: I1125 10:38:09.238414 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:38:09 crc kubenswrapper[4769]: E1125 10:38:09.239411 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:38:24 crc kubenswrapper[4769]: I1125 10:38:24.244113 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:38:24 crc kubenswrapper[4769]: E1125 10:38:24.244900 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:38:36 crc kubenswrapper[4769]: I1125 10:38:36.237588 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:38:36 crc kubenswrapper[4769]: E1125 10:38:36.238354 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:38:49 crc kubenswrapper[4769]: I1125 10:38:49.239097 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:38:49 crc kubenswrapper[4769]: E1125 10:38:49.239965 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:39:04 crc kubenswrapper[4769]: I1125 10:39:04.243509 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:39:05 crc kubenswrapper[4769]: I1125 10:39:05.233293 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"2dda2b74a193e53366972e7a46dc5b774e7871f50dd86ba2a2329a05e9765b6f"} Nov 25 10:39:38 crc kubenswrapper[4769]: I1125 10:39:38.836468 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zkkr6"] Nov 25 10:39:38 crc kubenswrapper[4769]: E1125 10:39:38.838395 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 25 10:39:38 crc kubenswrapper[4769]: I1125 10:39:38.838431 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 25 10:39:38 crc kubenswrapper[4769]: I1125 10:39:38.838990 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 25 10:39:38 crc kubenswrapper[4769]: I1125 10:39:38.843148 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:38 crc kubenswrapper[4769]: I1125 10:39:38.858495 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zkkr6"] Nov 25 10:39:38 crc kubenswrapper[4769]: I1125 10:39:38.927753 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-catalog-content\") pod \"certified-operators-zkkr6\" (UID: \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\") " pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:38 crc kubenswrapper[4769]: I1125 10:39:38.928402 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-utilities\") pod \"certified-operators-zkkr6\" (UID: \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\") " pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:38 crc kubenswrapper[4769]: I1125 10:39:38.929070 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2j5wc\" (UniqueName: \"kubernetes.io/projected/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-kube-api-access-2j5wc\") pod \"certified-operators-zkkr6\" (UID: \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\") " pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:39 crc kubenswrapper[4769]: I1125 10:39:39.032154 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2j5wc\" (UniqueName: \"kubernetes.io/projected/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-kube-api-access-2j5wc\") pod \"certified-operators-zkkr6\" (UID: \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\") " pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:39 crc kubenswrapper[4769]: I1125 10:39:39.032484 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-catalog-content\") pod \"certified-operators-zkkr6\" (UID: \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\") " pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:39 crc kubenswrapper[4769]: I1125 10:39:39.032848 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-utilities\") pod \"certified-operators-zkkr6\" (UID: \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\") " pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:39 crc kubenswrapper[4769]: I1125 10:39:39.033180 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-catalog-content\") pod \"certified-operators-zkkr6\" (UID: \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\") " pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:39 crc kubenswrapper[4769]: I1125 10:39:39.033350 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-utilities\") pod \"certified-operators-zkkr6\" (UID: \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\") " pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:39 crc kubenswrapper[4769]: I1125 10:39:39.058159 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2j5wc\" (UniqueName: \"kubernetes.io/projected/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-kube-api-access-2j5wc\") pod \"certified-operators-zkkr6\" (UID: \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\") " pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:39 crc kubenswrapper[4769]: I1125 10:39:39.178626 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:39 crc kubenswrapper[4769]: I1125 10:39:39.863564 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zkkr6"] Nov 25 10:39:39 crc kubenswrapper[4769]: W1125 10:39:39.876269 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a68b26_59dc_4bde_b3a6_37a68f0d1ae5.slice/crio-4501dff8130b7cdbd120bea19b66bca21b918cc06b51ec71c5a62425ebae9abb WatchSource:0}: Error finding container 4501dff8130b7cdbd120bea19b66bca21b918cc06b51ec71c5a62425ebae9abb: Status 404 returned error can't find the container with id 4501dff8130b7cdbd120bea19b66bca21b918cc06b51ec71c5a62425ebae9abb Nov 25 10:39:40 crc kubenswrapper[4769]: I1125 10:39:40.733522 4769 generic.go:334] "Generic (PLEG): container finished" podID="37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" containerID="d5828d4a8c0f4ee7d022fd76c57afb2ed79fc222374e799b9c4bcb99eafb1c48" exitCode=0 Nov 25 10:39:40 crc kubenswrapper[4769]: I1125 10:39:40.733597 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zkkr6" event={"ID":"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5","Type":"ContainerDied","Data":"d5828d4a8c0f4ee7d022fd76c57afb2ed79fc222374e799b9c4bcb99eafb1c48"} Nov 25 10:39:40 crc kubenswrapper[4769]: I1125 10:39:40.734266 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zkkr6" event={"ID":"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5","Type":"ContainerStarted","Data":"4501dff8130b7cdbd120bea19b66bca21b918cc06b51ec71c5a62425ebae9abb"} Nov 25 10:39:42 crc kubenswrapper[4769]: I1125 10:39:42.772132 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zkkr6" event={"ID":"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5","Type":"ContainerStarted","Data":"5f292a0abd572b0424300dad0c08f2ae204dce6756272baa8411b55e3dd756ea"} Nov 25 10:39:43 crc kubenswrapper[4769]: I1125 10:39:43.785618 4769 generic.go:334] "Generic (PLEG): container finished" podID="37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" containerID="5f292a0abd572b0424300dad0c08f2ae204dce6756272baa8411b55e3dd756ea" exitCode=0 Nov 25 10:39:43 crc kubenswrapper[4769]: I1125 10:39:43.785694 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zkkr6" event={"ID":"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5","Type":"ContainerDied","Data":"5f292a0abd572b0424300dad0c08f2ae204dce6756272baa8411b55e3dd756ea"} Nov 25 10:39:44 crc kubenswrapper[4769]: I1125 10:39:44.796488 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zkkr6" event={"ID":"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5","Type":"ContainerStarted","Data":"8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505"} Nov 25 10:39:44 crc kubenswrapper[4769]: I1125 10:39:44.828995 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zkkr6" podStartSLOduration=3.251154493 podStartE2EDuration="6.828953753s" podCreationTimestamp="2025-11-25 10:39:38 +0000 UTC" firstStartedPulling="2025-11-25 10:39:40.737009285 +0000 UTC m=+3329.321981608" lastFinishedPulling="2025-11-25 10:39:44.314808515 +0000 UTC m=+3332.899780868" observedRunningTime="2025-11-25 10:39:44.81423469 +0000 UTC m=+3333.399207023" watchObservedRunningTime="2025-11-25 10:39:44.828953753 +0000 UTC m=+3333.413926086" Nov 25 10:39:49 crc kubenswrapper[4769]: I1125 10:39:49.179610 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:49 crc kubenswrapper[4769]: I1125 10:39:49.180301 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:49 crc kubenswrapper[4769]: I1125 10:39:49.277517 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:49 crc kubenswrapper[4769]: I1125 10:39:49.930951 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:50 crc kubenswrapper[4769]: I1125 10:39:50.006965 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zkkr6"] Nov 25 10:39:51 crc kubenswrapper[4769]: I1125 10:39:51.884992 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zkkr6" podUID="37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" containerName="registry-server" containerID="cri-o://8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505" gracePeriod=2 Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.674798 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.743584 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-catalog-content\") pod \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\" (UID: \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\") " Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.743739 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2j5wc\" (UniqueName: \"kubernetes.io/projected/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-kube-api-access-2j5wc\") pod \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\" (UID: \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\") " Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.743844 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-utilities\") pod \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\" (UID: \"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5\") " Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.745836 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-utilities" (OuterVolumeSpecName: "utilities") pod "37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" (UID: "37a68b26-59dc-4bde-b3a6-37a68f0d1ae5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.766034 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-kube-api-access-2j5wc" (OuterVolumeSpecName: "kube-api-access-2j5wc") pod "37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" (UID: "37a68b26-59dc-4bde-b3a6-37a68f0d1ae5"). InnerVolumeSpecName "kube-api-access-2j5wc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.801648 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" (UID: "37a68b26-59dc-4bde-b3a6-37a68f0d1ae5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.846661 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.846702 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2j5wc\" (UniqueName: \"kubernetes.io/projected/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-kube-api-access-2j5wc\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.846720 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.903628 4769 generic.go:334] "Generic (PLEG): container finished" podID="37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" containerID="8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505" exitCode=0 Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.903700 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zkkr6" event={"ID":"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5","Type":"ContainerDied","Data":"8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505"} Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.903742 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zkkr6" event={"ID":"37a68b26-59dc-4bde-b3a6-37a68f0d1ae5","Type":"ContainerDied","Data":"4501dff8130b7cdbd120bea19b66bca21b918cc06b51ec71c5a62425ebae9abb"} Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.903772 4769 scope.go:117] "RemoveContainer" containerID="8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505" Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.904030 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zkkr6" Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.928381 4769 scope.go:117] "RemoveContainer" containerID="5f292a0abd572b0424300dad0c08f2ae204dce6756272baa8411b55e3dd756ea" Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.951992 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zkkr6"] Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.966799 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zkkr6"] Nov 25 10:39:52 crc kubenswrapper[4769]: I1125 10:39:52.967604 4769 scope.go:117] "RemoveContainer" containerID="d5828d4a8c0f4ee7d022fd76c57afb2ed79fc222374e799b9c4bcb99eafb1c48" Nov 25 10:39:53 crc kubenswrapper[4769]: I1125 10:39:53.027284 4769 scope.go:117] "RemoveContainer" containerID="8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505" Nov 25 10:39:53 crc kubenswrapper[4769]: E1125 10:39:53.028358 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505\": container with ID starting with 8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505 not found: ID does not exist" containerID="8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505" Nov 25 10:39:53 crc kubenswrapper[4769]: I1125 10:39:53.028410 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505"} err="failed to get container status \"8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505\": rpc error: code = NotFound desc = could not find container \"8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505\": container with ID starting with 8493ef81011e012c9ba6a4b7bb0eb264f475696b738ce2e69ce9846d4416f505 not found: ID does not exist" Nov 25 10:39:53 crc kubenswrapper[4769]: I1125 10:39:53.028441 4769 scope.go:117] "RemoveContainer" containerID="5f292a0abd572b0424300dad0c08f2ae204dce6756272baa8411b55e3dd756ea" Nov 25 10:39:53 crc kubenswrapper[4769]: E1125 10:39:53.028900 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f292a0abd572b0424300dad0c08f2ae204dce6756272baa8411b55e3dd756ea\": container with ID starting with 5f292a0abd572b0424300dad0c08f2ae204dce6756272baa8411b55e3dd756ea not found: ID does not exist" containerID="5f292a0abd572b0424300dad0c08f2ae204dce6756272baa8411b55e3dd756ea" Nov 25 10:39:53 crc kubenswrapper[4769]: I1125 10:39:53.028953 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f292a0abd572b0424300dad0c08f2ae204dce6756272baa8411b55e3dd756ea"} err="failed to get container status \"5f292a0abd572b0424300dad0c08f2ae204dce6756272baa8411b55e3dd756ea\": rpc error: code = NotFound desc = could not find container \"5f292a0abd572b0424300dad0c08f2ae204dce6756272baa8411b55e3dd756ea\": container with ID starting with 5f292a0abd572b0424300dad0c08f2ae204dce6756272baa8411b55e3dd756ea not found: ID does not exist" Nov 25 10:39:53 crc kubenswrapper[4769]: I1125 10:39:53.029013 4769 scope.go:117] "RemoveContainer" containerID="d5828d4a8c0f4ee7d022fd76c57afb2ed79fc222374e799b9c4bcb99eafb1c48" Nov 25 10:39:53 crc kubenswrapper[4769]: E1125 10:39:53.029379 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5828d4a8c0f4ee7d022fd76c57afb2ed79fc222374e799b9c4bcb99eafb1c48\": container with ID starting with d5828d4a8c0f4ee7d022fd76c57afb2ed79fc222374e799b9c4bcb99eafb1c48 not found: ID does not exist" containerID="d5828d4a8c0f4ee7d022fd76c57afb2ed79fc222374e799b9c4bcb99eafb1c48" Nov 25 10:39:53 crc kubenswrapper[4769]: I1125 10:39:53.029421 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5828d4a8c0f4ee7d022fd76c57afb2ed79fc222374e799b9c4bcb99eafb1c48"} err="failed to get container status \"d5828d4a8c0f4ee7d022fd76c57afb2ed79fc222374e799b9c4bcb99eafb1c48\": rpc error: code = NotFound desc = could not find container \"d5828d4a8c0f4ee7d022fd76c57afb2ed79fc222374e799b9c4bcb99eafb1c48\": container with ID starting with d5828d4a8c0f4ee7d022fd76c57afb2ed79fc222374e799b9c4bcb99eafb1c48 not found: ID does not exist" Nov 25 10:39:54 crc kubenswrapper[4769]: I1125 10:39:54.259399 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" path="/var/lib/kubelet/pods/37a68b26-59dc-4bde-b3a6-37a68f0d1ae5/volumes" Nov 25 10:41:22 crc kubenswrapper[4769]: I1125 10:41:22.290239 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:41:22 crc kubenswrapper[4769]: I1125 10:41:22.290858 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:41:52 crc kubenswrapper[4769]: I1125 10:41:52.290632 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:41:52 crc kubenswrapper[4769]: I1125 10:41:52.291219 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.929663 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-657bc78fc7-ccwr8" podUID="5aee4062-7d9d-44f0-a07c-6f0704946803" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.209:8080/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.952316 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" podUID="70f81d0a-db58-4bd4-a0e2-ee1c03e2f923" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.126:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.952671 4769 patch_prober.go:28] interesting pod/dns-default-hn7p6 container/dns namespace/openshift-dns: Readiness probe status=failure output="Get \"http://10.217.0.43:8181/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.952695 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-dns/dns-default-hn7p6" podUID="d8add56f-d9a7-4a9a-8f68-b7900166c27f" containerName="dns" probeResult="failure" output="Get \"http://10.217.0.43:8181/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.952814 4769 patch_prober.go:28] interesting pod/logging-loki-distributor-76cc67bf56-g29kd container/loki-distributor namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.75:3101/loki/api/v1/status/buildinfo\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.952837 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" podUID="163db859-0d28-48d8-b06a-f6a94e19479d" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.75:3101/loki/api/v1/status/buildinfo\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.953003 4769 patch_prober.go:28] interesting pod/logging-loki-distributor-76cc67bf56-g29kd container/loki-distributor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.75:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.953025 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-distributor-76cc67bf56-g29kd" podUID="163db859-0d28-48d8-b06a-f6a94e19479d" containerName="loki-distributor" probeResult="failure" output="Get \"https://10.217.0.75:3101/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.953179 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-sx97n" podUID="86a2de7a-5314-47ef-b827-92050023a677" containerName="hostpath-provisioner" probeResult="failure" output="Get \"http://10.217.0.37:9898/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.953331 4769 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wn2f7 container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.55:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.953348 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" podUID="2ef0d2ad-687b-4157-8ab5-803122670e19" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.55:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.953552 4769 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wn2f7 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.55:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.953567 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" podUID="2ef0d2ad-687b-4157-8ab5-803122670e19" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.55:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.953712 4769 patch_prober.go:28] interesting pod/logging-loki-querier-5895d59bb8-tbhrj container/loki-querier namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.76:3101/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.953743 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" podUID="6091a51e-9c46-48cd-bb3a-ff1f3c9aa965" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.76:3101/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.953891 4769 patch_prober.go:28] interesting pod/logging-loki-querier-5895d59bb8-tbhrj container/loki-querier namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.76:3101/loki/api/v1/status/buildinfo\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.953917 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-querier-5895d59bb8-tbhrj" podUID="6091a51e-9c46-48cd-bb3a-ff1f3c9aa965" containerName="loki-querier" probeResult="failure" output="Get \"https://10.217.0.76:3101/loki/api/v1/status/buildinfo\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.954073 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-657bc78fc7-ccwr8" podUID="5aee4062-7d9d-44f0-a07c-6f0704946803" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.209:8080/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.954213 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-657bc78fc7-ccwr8" podUID="5aee4062-7d9d-44f0-a07c-6f0704946803" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.209:8080/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.954356 4769 patch_prober.go:28] interesting pod/logging-loki-query-frontend-84558f7c9f-vdrhz container/loki-query-frontend namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.77:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.954377 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" podUID="8f616f7b-7627-4878-9279-6d12b8ac3bb7" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.77:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.954516 4769 patch_prober.go:28] interesting pod/logging-loki-query-frontend-84558f7c9f-vdrhz container/loki-query-frontend namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.77:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.954541 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-vdrhz" podUID="8f616f7b-7627-4878-9279-6d12b8ac3bb7" containerName="loki-query-frontend" probeResult="failure" output="Get \"https://10.217.0.77:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.954671 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="87066e63-7bf0-47dd-a601-88c880a1b5e4" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.206:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.954801 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/prometheus-metric-storage-0" podUID="8ef4c37f-e3dc-4994-92bf-b41e7c215ef1" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.175:9090/-/healthy\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.954950 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="8ef4c37f-e3dc-4994-92bf-b41e7c215ef1" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.175:9090/-/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.955098 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/speaker-84t8t" podUID="ce9a407b-6a62-48d8-a15d-e08c1b09c3e3" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.955247 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/speaker-84t8t" podUID="ce9a407b-6a62-48d8-a15d-e08c1b09c3e3" containerName="speaker" probeResult="failure" output="Get \"http://localhost:29150/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.978890 4769 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-l8nzd container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.978943 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" podUID="75c93e2f-793a-49b1-bd51-73adf5f2edaf" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.999501 4769 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-l8nzd container/operator namespace/openshift-operators: Liveness probe status=failure output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:25 crc kubenswrapper[4769]: I1125 10:42:25.999544 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operators/observability-operator-d8bb48f5d-l8nzd" podUID="75c93e2f-793a-49b1-bd51-73adf5f2edaf" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.016069 4769 patch_prober.go:28] interesting pod/logging-loki-compactor-0 container/loki-compactor namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.81:3101/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.016140 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-compactor-0" podUID="a4405372-5512-4a21-9e58-569bdcd4389c" containerName="loki-compactor" probeResult="failure" output="Get \"https://10.217.0.81:3101/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.024242 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-55c9569f76-llhqr" podUID="395976a4-ddee-425d-994f-c913076f1710" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.98:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025184 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="18a4a142-bb1b-4e44-9110-6a6e15b86b0d" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.207:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025371 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-657bc78fc7-ccwr8" podUID="5aee4062-7d9d-44f0-a07c-6f0704946803" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.209:8080/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025420 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" podUID="6520a852-60ef-47d1-800b-633eae1655dd" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.104:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025449 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-api-0" podUID="87066e63-7bf0-47dd-a601-88c880a1b5e4" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.206:8776/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025477 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" podUID="8df830ab-ab7a-49cd-b7d4-72d44c99cc4f" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.99:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025502 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-webhook-server-6998585d5-v89x9" podUID="8df830ab-ab7a-49cd-b7d4-72d44c99cc4f" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.99:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025526 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="7c17c0cb-e73b-466d-8fae-ad581561fcb0" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.4:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025551 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="7c17c0cb-e73b-466d-8fae-ad581561fcb0" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.4:8775/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025575 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" podUID="70f81d0a-db58-4bd4-a0e2-ee1c03e2f923" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.126:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025687 4769 patch_prober.go:28] interesting pod/logging-loki-gateway-5bdd8fd454-klwtc container/opa namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.79:8083/live\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025710 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" podUID="f535e254-5602-4794-9f47-e9bb2c1454b2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.79:8083/live\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025736 4769 patch_prober.go:28] interesting pod/logging-loki-gateway-5bdd8fd454-8hzw5 container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.78:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025750 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" podUID="daa191b3-4057-42fd-8c0c-d0aa065af77b" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.78:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025773 4769 patch_prober.go:28] interesting pod/logging-loki-gateway-5bdd8fd454-8hzw5 container/opa namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.78:8083/live\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025793 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" podUID="daa191b3-4057-42fd-8c0c-d0aa065af77b" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.78:8083/live\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025815 4769 patch_prober.go:28] interesting pod/logging-loki-gateway-5bdd8fd454-8hzw5 container/gateway namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.78:8081/live\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025829 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" podUID="daa191b3-4057-42fd-8c0c-d0aa065af77b" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.78:8081/live\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025849 4769 patch_prober.go:28] interesting pod/logging-loki-gateway-5bdd8fd454-klwtc container/gateway namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.79:8081/live\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025860 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" podUID="f535e254-5602-4794-9f47-e9bb2c1454b2" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.79:8081/live\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025882 4769 patch_prober.go:28] interesting pod/logging-loki-gateway-5bdd8fd454-klwtc container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.79:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025895 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" podUID="f535e254-5602-4794-9f47-e9bb2c1454b2" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.79:8083/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025916 4769 patch_prober.go:28] interesting pod/logging-loki-gateway-5bdd8fd454-8hzw5 container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.78:8081/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025928 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-8hzw5" podUID="daa191b3-4057-42fd-8c0c-d0aa065af77b" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.78:8081/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025952 4769 patch_prober.go:28] interesting pod/logging-loki-gateway-5bdd8fd454-klwtc container/gateway namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.79:8081/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.025979 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-5bdd8fd454-klwtc" podUID="f535e254-5602-4794-9f47-e9bb2c1454b2" containerName="gateway" probeResult="failure" output="Get \"https://10.217.0.79:8081/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.026057 4769 patch_prober.go:28] interesting pod/console-859655bd7d-v8qvf container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.139:8443/health\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.026072 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-859655bd7d-v8qvf" podUID="18f2ba17-1349-4c25-9585-d1f119a6988b" containerName="console" probeResult="failure" output="Get \"https://10.217.0.139:8443/health\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.026100 4769 patch_prober.go:28] interesting pod/logging-loki-compactor-0 container/loki-compactor namespace/openshift-logging: Liveness probe status=failure output="Get \"https://10.217.0.81:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.026115 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-logging/logging-loki-compactor-0" podUID="a4405372-5512-4a21-9e58-569bdcd4389c" containerName="loki-compactor" probeResult="failure" output="Get \"https://10.217.0.81:3101/loki/api/v1/status/buildinfo\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.026488 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.026525 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.080321 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.081696 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2dda2b74a193e53366972e7a46dc5b774e7871f50dd86ba2a2329a05e9765b6f"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:42:26 crc kubenswrapper[4769]: I1125 10:42:26.084370 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://2dda2b74a193e53366972e7a46dc5b774e7871f50dd86ba2a2329a05e9765b6f" gracePeriod=600 Nov 25 10:42:27 crc kubenswrapper[4769]: I1125 10:42:27.146934 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="2dda2b74a193e53366972e7a46dc5b774e7871f50dd86ba2a2329a05e9765b6f" exitCode=0 Nov 25 10:42:27 crc kubenswrapper[4769]: I1125 10:42:27.147153 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"2dda2b74a193e53366972e7a46dc5b774e7871f50dd86ba2a2329a05e9765b6f"} Nov 25 10:42:27 crc kubenswrapper[4769]: I1125 10:42:27.147758 4769 scope.go:117] "RemoveContainer" containerID="ca58e4115a8ae8db9ff23948bf8aee779f500aa2408587bba8cdb927db89acf0" Nov 25 10:42:28 crc kubenswrapper[4769]: I1125 10:42:28.165365 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2"} Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.657991 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-f76hc"] Nov 25 10:43:40 crc kubenswrapper[4769]: E1125 10:43:40.658890 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" containerName="registry-server" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.658903 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" containerName="registry-server" Nov 25 10:43:40 crc kubenswrapper[4769]: E1125 10:43:40.658924 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" containerName="extract-utilities" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.658931 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" containerName="extract-utilities" Nov 25 10:43:40 crc kubenswrapper[4769]: E1125 10:43:40.658956 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" containerName="extract-content" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.658975 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" containerName="extract-content" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.659210 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="37a68b26-59dc-4bde-b3a6-37a68f0d1ae5" containerName="registry-server" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.660890 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.672493 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f76hc"] Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.739642 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmhb2\" (UniqueName: \"kubernetes.io/projected/94f049f7-9223-4ae6-adb7-d2ce70808532-kube-api-access-dmhb2\") pod \"community-operators-f76hc\" (UID: \"94f049f7-9223-4ae6-adb7-d2ce70808532\") " pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.739699 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f049f7-9223-4ae6-adb7-d2ce70808532-utilities\") pod \"community-operators-f76hc\" (UID: \"94f049f7-9223-4ae6-adb7-d2ce70808532\") " pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.739771 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f049f7-9223-4ae6-adb7-d2ce70808532-catalog-content\") pod \"community-operators-f76hc\" (UID: \"94f049f7-9223-4ae6-adb7-d2ce70808532\") " pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.843367 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmhb2\" (UniqueName: \"kubernetes.io/projected/94f049f7-9223-4ae6-adb7-d2ce70808532-kube-api-access-dmhb2\") pod \"community-operators-f76hc\" (UID: \"94f049f7-9223-4ae6-adb7-d2ce70808532\") " pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.843421 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f049f7-9223-4ae6-adb7-d2ce70808532-utilities\") pod \"community-operators-f76hc\" (UID: \"94f049f7-9223-4ae6-adb7-d2ce70808532\") " pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.843473 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f049f7-9223-4ae6-adb7-d2ce70808532-catalog-content\") pod \"community-operators-f76hc\" (UID: \"94f049f7-9223-4ae6-adb7-d2ce70808532\") " pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.844116 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f049f7-9223-4ae6-adb7-d2ce70808532-catalog-content\") pod \"community-operators-f76hc\" (UID: \"94f049f7-9223-4ae6-adb7-d2ce70808532\") " pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.844221 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f049f7-9223-4ae6-adb7-d2ce70808532-utilities\") pod \"community-operators-f76hc\" (UID: \"94f049f7-9223-4ae6-adb7-d2ce70808532\") " pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.869775 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmhb2\" (UniqueName: \"kubernetes.io/projected/94f049f7-9223-4ae6-adb7-d2ce70808532-kube-api-access-dmhb2\") pod \"community-operators-f76hc\" (UID: \"94f049f7-9223-4ae6-adb7-d2ce70808532\") " pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:40 crc kubenswrapper[4769]: I1125 10:43:40.984504 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:41 crc kubenswrapper[4769]: I1125 10:43:41.587333 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f76hc"] Nov 25 10:43:42 crc kubenswrapper[4769]: I1125 10:43:42.185871 4769 generic.go:334] "Generic (PLEG): container finished" podID="94f049f7-9223-4ae6-adb7-d2ce70808532" containerID="051eea45af87ed252cc8e02aba2679f978e40cdc847aeba15c79366df8cf69e8" exitCode=0 Nov 25 10:43:42 crc kubenswrapper[4769]: I1125 10:43:42.187876 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f76hc" event={"ID":"94f049f7-9223-4ae6-adb7-d2ce70808532","Type":"ContainerDied","Data":"051eea45af87ed252cc8e02aba2679f978e40cdc847aeba15c79366df8cf69e8"} Nov 25 10:43:42 crc kubenswrapper[4769]: I1125 10:43:42.187911 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f76hc" event={"ID":"94f049f7-9223-4ae6-adb7-d2ce70808532","Type":"ContainerStarted","Data":"92bc1a56def4a94b59e898639971724d515a96a638f4e5c8f53b989ee18b8b80"} Nov 25 10:43:42 crc kubenswrapper[4769]: I1125 10:43:42.189858 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:43:44 crc kubenswrapper[4769]: I1125 10:43:44.216238 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f76hc" event={"ID":"94f049f7-9223-4ae6-adb7-d2ce70808532","Type":"ContainerStarted","Data":"b8749b11a4c6666f7236638e66510f80fa717f8728851571f897983339662f26"} Nov 25 10:43:45 crc kubenswrapper[4769]: I1125 10:43:45.232134 4769 generic.go:334] "Generic (PLEG): container finished" podID="94f049f7-9223-4ae6-adb7-d2ce70808532" containerID="b8749b11a4c6666f7236638e66510f80fa717f8728851571f897983339662f26" exitCode=0 Nov 25 10:43:45 crc kubenswrapper[4769]: I1125 10:43:45.232415 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f76hc" event={"ID":"94f049f7-9223-4ae6-adb7-d2ce70808532","Type":"ContainerDied","Data":"b8749b11a4c6666f7236638e66510f80fa717f8728851571f897983339662f26"} Nov 25 10:43:45 crc kubenswrapper[4769]: I1125 10:43:45.880900 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-l28dx"] Nov 25 10:43:45 crc kubenswrapper[4769]: I1125 10:43:45.883617 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:45 crc kubenswrapper[4769]: I1125 10:43:45.900371 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l28dx"] Nov 25 10:43:45 crc kubenswrapper[4769]: I1125 10:43:45.984295 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-utilities\") pod \"redhat-operators-l28dx\" (UID: \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\") " pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:45 crc kubenswrapper[4769]: I1125 10:43:45.984669 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-catalog-content\") pod \"redhat-operators-l28dx\" (UID: \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\") " pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:45 crc kubenswrapper[4769]: I1125 10:43:45.984803 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psldb\" (UniqueName: \"kubernetes.io/projected/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-kube-api-access-psldb\") pod \"redhat-operators-l28dx\" (UID: \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\") " pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:46 crc kubenswrapper[4769]: I1125 10:43:46.089086 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-catalog-content\") pod \"redhat-operators-l28dx\" (UID: \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\") " pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:46 crc kubenswrapper[4769]: I1125 10:43:46.090223 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psldb\" (UniqueName: \"kubernetes.io/projected/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-kube-api-access-psldb\") pod \"redhat-operators-l28dx\" (UID: \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\") " pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:46 crc kubenswrapper[4769]: I1125 10:43:46.091284 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-utilities\") pod \"redhat-operators-l28dx\" (UID: \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\") " pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:46 crc kubenswrapper[4769]: I1125 10:43:46.093329 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-catalog-content\") pod \"redhat-operators-l28dx\" (UID: \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\") " pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:46 crc kubenswrapper[4769]: I1125 10:43:46.093365 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-utilities\") pod \"redhat-operators-l28dx\" (UID: \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\") " pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:46 crc kubenswrapper[4769]: I1125 10:43:46.125607 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psldb\" (UniqueName: \"kubernetes.io/projected/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-kube-api-access-psldb\") pod \"redhat-operators-l28dx\" (UID: \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\") " pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:46 crc kubenswrapper[4769]: I1125 10:43:46.237718 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:46 crc kubenswrapper[4769]: I1125 10:43:46.262825 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f76hc" event={"ID":"94f049f7-9223-4ae6-adb7-d2ce70808532","Type":"ContainerStarted","Data":"ce65c7aa9d31d2aafb32b20589ae35a983bbd6a83b86e1f960ae83142f66b55d"} Nov 25 10:43:46 crc kubenswrapper[4769]: I1125 10:43:46.297443 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-f76hc" podStartSLOduration=2.70130534 podStartE2EDuration="6.297423285s" podCreationTimestamp="2025-11-25 10:43:40 +0000 UTC" firstStartedPulling="2025-11-25 10:43:42.189576229 +0000 UTC m=+3570.774548542" lastFinishedPulling="2025-11-25 10:43:45.785694134 +0000 UTC m=+3574.370666487" observedRunningTime="2025-11-25 10:43:46.290904543 +0000 UTC m=+3574.875876866" watchObservedRunningTime="2025-11-25 10:43:46.297423285 +0000 UTC m=+3574.882395598" Nov 25 10:43:46 crc kubenswrapper[4769]: I1125 10:43:46.796003 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l28dx"] Nov 25 10:43:47 crc kubenswrapper[4769]: I1125 10:43:47.275080 4769 generic.go:334] "Generic (PLEG): container finished" podID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" containerID="e75417a1d2dbd9077d526124cd6b104e98c2438812872f28b60f4c2ef935d9f2" exitCode=0 Nov 25 10:43:47 crc kubenswrapper[4769]: I1125 10:43:47.275155 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l28dx" event={"ID":"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91","Type":"ContainerDied","Data":"e75417a1d2dbd9077d526124cd6b104e98c2438812872f28b60f4c2ef935d9f2"} Nov 25 10:43:47 crc kubenswrapper[4769]: I1125 10:43:47.275468 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l28dx" event={"ID":"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91","Type":"ContainerStarted","Data":"5988cda84e8336d09ad95010be91fdd759de67e036060b0372d7fb0f1d70aedd"} Nov 25 10:43:48 crc kubenswrapper[4769]: I1125 10:43:48.293142 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l28dx" event={"ID":"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91","Type":"ContainerStarted","Data":"5b990715ada23878c43bcbe5cbc733073dab9a655d7d5b2b8f328de6f1a698bb"} Nov 25 10:43:50 crc kubenswrapper[4769]: I1125 10:43:50.986216 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:50 crc kubenswrapper[4769]: I1125 10:43:50.986649 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:51 crc kubenswrapper[4769]: I1125 10:43:51.045076 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:51 crc kubenswrapper[4769]: I1125 10:43:51.357408 4769 generic.go:334] "Generic (PLEG): container finished" podID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" containerID="5b990715ada23878c43bcbe5cbc733073dab9a655d7d5b2b8f328de6f1a698bb" exitCode=0 Nov 25 10:43:51 crc kubenswrapper[4769]: I1125 10:43:51.357564 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l28dx" event={"ID":"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91","Type":"ContainerDied","Data":"5b990715ada23878c43bcbe5cbc733073dab9a655d7d5b2b8f328de6f1a698bb"} Nov 25 10:43:51 crc kubenswrapper[4769]: I1125 10:43:51.418658 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:52 crc kubenswrapper[4769]: I1125 10:43:52.371766 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l28dx" event={"ID":"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91","Type":"ContainerStarted","Data":"8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065"} Nov 25 10:43:52 crc kubenswrapper[4769]: I1125 10:43:52.400082 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-l28dx" podStartSLOduration=2.837382833 podStartE2EDuration="7.400061324s" podCreationTimestamp="2025-11-25 10:43:45 +0000 UTC" firstStartedPulling="2025-11-25 10:43:47.27754756 +0000 UTC m=+3575.862519873" lastFinishedPulling="2025-11-25 10:43:51.840226041 +0000 UTC m=+3580.425198364" observedRunningTime="2025-11-25 10:43:52.388308943 +0000 UTC m=+3580.973281256" watchObservedRunningTime="2025-11-25 10:43:52.400061324 +0000 UTC m=+3580.985033657" Nov 25 10:43:54 crc kubenswrapper[4769]: I1125 10:43:54.259740 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f76hc"] Nov 25 10:43:54 crc kubenswrapper[4769]: I1125 10:43:54.260745 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-f76hc" podUID="94f049f7-9223-4ae6-adb7-d2ce70808532" containerName="registry-server" containerID="cri-o://ce65c7aa9d31d2aafb32b20589ae35a983bbd6a83b86e1f960ae83142f66b55d" gracePeriod=2 Nov 25 10:43:54 crc kubenswrapper[4769]: I1125 10:43:54.426251 4769 generic.go:334] "Generic (PLEG): container finished" podID="94f049f7-9223-4ae6-adb7-d2ce70808532" containerID="ce65c7aa9d31d2aafb32b20589ae35a983bbd6a83b86e1f960ae83142f66b55d" exitCode=0 Nov 25 10:43:54 crc kubenswrapper[4769]: I1125 10:43:54.426312 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f76hc" event={"ID":"94f049f7-9223-4ae6-adb7-d2ce70808532","Type":"ContainerDied","Data":"ce65c7aa9d31d2aafb32b20589ae35a983bbd6a83b86e1f960ae83142f66b55d"} Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.049061 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.115252 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f049f7-9223-4ae6-adb7-d2ce70808532-catalog-content\") pod \"94f049f7-9223-4ae6-adb7-d2ce70808532\" (UID: \"94f049f7-9223-4ae6-adb7-d2ce70808532\") " Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.115415 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmhb2\" (UniqueName: \"kubernetes.io/projected/94f049f7-9223-4ae6-adb7-d2ce70808532-kube-api-access-dmhb2\") pod \"94f049f7-9223-4ae6-adb7-d2ce70808532\" (UID: \"94f049f7-9223-4ae6-adb7-d2ce70808532\") " Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.115475 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f049f7-9223-4ae6-adb7-d2ce70808532-utilities\") pod \"94f049f7-9223-4ae6-adb7-d2ce70808532\" (UID: \"94f049f7-9223-4ae6-adb7-d2ce70808532\") " Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.116313 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94f049f7-9223-4ae6-adb7-d2ce70808532-utilities" (OuterVolumeSpecName: "utilities") pod "94f049f7-9223-4ae6-adb7-d2ce70808532" (UID: "94f049f7-9223-4ae6-adb7-d2ce70808532"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.116490 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94f049f7-9223-4ae6-adb7-d2ce70808532-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.121303 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94f049f7-9223-4ae6-adb7-d2ce70808532-kube-api-access-dmhb2" (OuterVolumeSpecName: "kube-api-access-dmhb2") pod "94f049f7-9223-4ae6-adb7-d2ce70808532" (UID: "94f049f7-9223-4ae6-adb7-d2ce70808532"). InnerVolumeSpecName "kube-api-access-dmhb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.177797 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94f049f7-9223-4ae6-adb7-d2ce70808532-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94f049f7-9223-4ae6-adb7-d2ce70808532" (UID: "94f049f7-9223-4ae6-adb7-d2ce70808532"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.218746 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94f049f7-9223-4ae6-adb7-d2ce70808532-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.218795 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmhb2\" (UniqueName: \"kubernetes.io/projected/94f049f7-9223-4ae6-adb7-d2ce70808532-kube-api-access-dmhb2\") on node \"crc\" DevicePath \"\"" Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.443824 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f76hc" event={"ID":"94f049f7-9223-4ae6-adb7-d2ce70808532","Type":"ContainerDied","Data":"92bc1a56def4a94b59e898639971724d515a96a638f4e5c8f53b989ee18b8b80"} Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.443914 4769 scope.go:117] "RemoveContainer" containerID="ce65c7aa9d31d2aafb32b20589ae35a983bbd6a83b86e1f960ae83142f66b55d" Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.443935 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f76hc" Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.475308 4769 scope.go:117] "RemoveContainer" containerID="b8749b11a4c6666f7236638e66510f80fa717f8728851571f897983339662f26" Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.502354 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f76hc"] Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.513739 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-f76hc"] Nov 25 10:43:55 crc kubenswrapper[4769]: I1125 10:43:55.513937 4769 scope.go:117] "RemoveContainer" containerID="051eea45af87ed252cc8e02aba2679f978e40cdc847aeba15c79366df8cf69e8" Nov 25 10:43:56 crc kubenswrapper[4769]: I1125 10:43:56.251779 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94f049f7-9223-4ae6-adb7-d2ce70808532" path="/var/lib/kubelet/pods/94f049f7-9223-4ae6-adb7-d2ce70808532/volumes" Nov 25 10:43:56 crc kubenswrapper[4769]: I1125 10:43:56.254198 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:56 crc kubenswrapper[4769]: I1125 10:43:56.254313 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:43:57 crc kubenswrapper[4769]: I1125 10:43:57.300226 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-l28dx" podUID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" containerName="registry-server" probeResult="failure" output=< Nov 25 10:43:57 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 10:43:57 crc kubenswrapper[4769]: > Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.036413 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rlcqw"] Nov 25 10:44:03 crc kubenswrapper[4769]: E1125 10:44:03.038572 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94f049f7-9223-4ae6-adb7-d2ce70808532" containerName="extract-content" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.038606 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="94f049f7-9223-4ae6-adb7-d2ce70808532" containerName="extract-content" Nov 25 10:44:03 crc kubenswrapper[4769]: E1125 10:44:03.038637 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94f049f7-9223-4ae6-adb7-d2ce70808532" containerName="extract-utilities" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.038647 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="94f049f7-9223-4ae6-adb7-d2ce70808532" containerName="extract-utilities" Nov 25 10:44:03 crc kubenswrapper[4769]: E1125 10:44:03.038661 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94f049f7-9223-4ae6-adb7-d2ce70808532" containerName="registry-server" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.038667 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="94f049f7-9223-4ae6-adb7-d2ce70808532" containerName="registry-server" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.038900 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="94f049f7-9223-4ae6-adb7-d2ce70808532" containerName="registry-server" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.040655 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.057994 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rlcqw"] Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.148602 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mk9rv\" (UniqueName: \"kubernetes.io/projected/090ce897-cef4-4e61-80c9-1cc1c6dac92f-kube-api-access-mk9rv\") pod \"redhat-marketplace-rlcqw\" (UID: \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\") " pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.148860 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/090ce897-cef4-4e61-80c9-1cc1c6dac92f-catalog-content\") pod \"redhat-marketplace-rlcqw\" (UID: \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\") " pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.148927 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/090ce897-cef4-4e61-80c9-1cc1c6dac92f-utilities\") pod \"redhat-marketplace-rlcqw\" (UID: \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\") " pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.251691 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/090ce897-cef4-4e61-80c9-1cc1c6dac92f-catalog-content\") pod \"redhat-marketplace-rlcqw\" (UID: \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\") " pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.251835 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/090ce897-cef4-4e61-80c9-1cc1c6dac92f-utilities\") pod \"redhat-marketplace-rlcqw\" (UID: \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\") " pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.251950 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mk9rv\" (UniqueName: \"kubernetes.io/projected/090ce897-cef4-4e61-80c9-1cc1c6dac92f-kube-api-access-mk9rv\") pod \"redhat-marketplace-rlcqw\" (UID: \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\") " pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.252099 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/090ce897-cef4-4e61-80c9-1cc1c6dac92f-catalog-content\") pod \"redhat-marketplace-rlcqw\" (UID: \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\") " pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.252309 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/090ce897-cef4-4e61-80c9-1cc1c6dac92f-utilities\") pod \"redhat-marketplace-rlcqw\" (UID: \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\") " pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.282362 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mk9rv\" (UniqueName: \"kubernetes.io/projected/090ce897-cef4-4e61-80c9-1cc1c6dac92f-kube-api-access-mk9rv\") pod \"redhat-marketplace-rlcqw\" (UID: \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\") " pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.374948 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:03 crc kubenswrapper[4769]: I1125 10:44:03.909106 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rlcqw"] Nov 25 10:44:04 crc kubenswrapper[4769]: I1125 10:44:04.555656 4769 generic.go:334] "Generic (PLEG): container finished" podID="090ce897-cef4-4e61-80c9-1cc1c6dac92f" containerID="0875c12095626685cd0dcd6d6326e59ad36de7c52fceb090ee50fc51408f090a" exitCode=0 Nov 25 10:44:04 crc kubenswrapper[4769]: I1125 10:44:04.555912 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlcqw" event={"ID":"090ce897-cef4-4e61-80c9-1cc1c6dac92f","Type":"ContainerDied","Data":"0875c12095626685cd0dcd6d6326e59ad36de7c52fceb090ee50fc51408f090a"} Nov 25 10:44:04 crc kubenswrapper[4769]: I1125 10:44:04.555938 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlcqw" event={"ID":"090ce897-cef4-4e61-80c9-1cc1c6dac92f","Type":"ContainerStarted","Data":"cfe33484693f6f9a20d54855923f74f68ec50bb0ca2b3c84e7aefc47bb23d959"} Nov 25 10:44:05 crc kubenswrapper[4769]: I1125 10:44:05.570265 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlcqw" event={"ID":"090ce897-cef4-4e61-80c9-1cc1c6dac92f","Type":"ContainerStarted","Data":"cf7efa2b0736105cb964bf8c919d8cdf6972a136efbd358c06ad72145a2758ed"} Nov 25 10:44:06 crc kubenswrapper[4769]: I1125 10:44:06.315761 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:44:06 crc kubenswrapper[4769]: I1125 10:44:06.366993 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:44:06 crc kubenswrapper[4769]: I1125 10:44:06.581714 4769 generic.go:334] "Generic (PLEG): container finished" podID="090ce897-cef4-4e61-80c9-1cc1c6dac92f" containerID="cf7efa2b0736105cb964bf8c919d8cdf6972a136efbd358c06ad72145a2758ed" exitCode=0 Nov 25 10:44:06 crc kubenswrapper[4769]: I1125 10:44:06.581794 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlcqw" event={"ID":"090ce897-cef4-4e61-80c9-1cc1c6dac92f","Type":"ContainerDied","Data":"cf7efa2b0736105cb964bf8c919d8cdf6972a136efbd358c06ad72145a2758ed"} Nov 25 10:44:07 crc kubenswrapper[4769]: I1125 10:44:07.595504 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlcqw" event={"ID":"090ce897-cef4-4e61-80c9-1cc1c6dac92f","Type":"ContainerStarted","Data":"a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d"} Nov 25 10:44:07 crc kubenswrapper[4769]: I1125 10:44:07.614271 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rlcqw" podStartSLOduration=2.155098571 podStartE2EDuration="4.614251303s" podCreationTimestamp="2025-11-25 10:44:03 +0000 UTC" firstStartedPulling="2025-11-25 10:44:04.557688824 +0000 UTC m=+3593.142661137" lastFinishedPulling="2025-11-25 10:44:07.016841546 +0000 UTC m=+3595.601813869" observedRunningTime="2025-11-25 10:44:07.612348602 +0000 UTC m=+3596.197320925" watchObservedRunningTime="2025-11-25 10:44:07.614251303 +0000 UTC m=+3596.199223626" Nov 25 10:44:08 crc kubenswrapper[4769]: I1125 10:44:08.612752 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l28dx"] Nov 25 10:44:08 crc kubenswrapper[4769]: I1125 10:44:08.614283 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-l28dx" podUID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" containerName="registry-server" containerID="cri-o://8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065" gracePeriod=2 Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.170631 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.303820 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-catalog-content\") pod \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\" (UID: \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\") " Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.303926 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psldb\" (UniqueName: \"kubernetes.io/projected/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-kube-api-access-psldb\") pod \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\" (UID: \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\") " Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.305655 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-utilities\") pod \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\" (UID: \"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91\") " Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.306418 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-utilities" (OuterVolumeSpecName: "utilities") pod "fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" (UID: "fe774271-9b3e-42b7-ae1d-fbb70cb6ec91"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.309164 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.334306 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-kube-api-access-psldb" (OuterVolumeSpecName: "kube-api-access-psldb") pod "fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" (UID: "fe774271-9b3e-42b7-ae1d-fbb70cb6ec91"). InnerVolumeSpecName "kube-api-access-psldb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.407493 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" (UID: "fe774271-9b3e-42b7-ae1d-fbb70cb6ec91"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.411422 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.411456 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psldb\" (UniqueName: \"kubernetes.io/projected/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91-kube-api-access-psldb\") on node \"crc\" DevicePath \"\"" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.619712 4769 generic.go:334] "Generic (PLEG): container finished" podID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" containerID="8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065" exitCode=0 Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.619750 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l28dx" event={"ID":"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91","Type":"ContainerDied","Data":"8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065"} Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.619776 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l28dx" event={"ID":"fe774271-9b3e-42b7-ae1d-fbb70cb6ec91","Type":"ContainerDied","Data":"5988cda84e8336d09ad95010be91fdd759de67e036060b0372d7fb0f1d70aedd"} Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.619799 4769 scope.go:117] "RemoveContainer" containerID="8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.619812 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l28dx" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.657093 4769 scope.go:117] "RemoveContainer" containerID="5b990715ada23878c43bcbe5cbc733073dab9a655d7d5b2b8f328de6f1a698bb" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.676342 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l28dx"] Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.684554 4769 scope.go:117] "RemoveContainer" containerID="e75417a1d2dbd9077d526124cd6b104e98c2438812872f28b60f4c2ef935d9f2" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.688770 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-l28dx"] Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.762530 4769 scope.go:117] "RemoveContainer" containerID="8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065" Nov 25 10:44:09 crc kubenswrapper[4769]: E1125 10:44:09.762956 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065\": container with ID starting with 8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065 not found: ID does not exist" containerID="8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.763030 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065"} err="failed to get container status \"8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065\": rpc error: code = NotFound desc = could not find container \"8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065\": container with ID starting with 8517c9cc22558a52fbb8124f12d3592471ec959a7d470cf7c8a3bb69d3ca7065 not found: ID does not exist" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.763058 4769 scope.go:117] "RemoveContainer" containerID="5b990715ada23878c43bcbe5cbc733073dab9a655d7d5b2b8f328de6f1a698bb" Nov 25 10:44:09 crc kubenswrapper[4769]: E1125 10:44:09.763421 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b990715ada23878c43bcbe5cbc733073dab9a655d7d5b2b8f328de6f1a698bb\": container with ID starting with 5b990715ada23878c43bcbe5cbc733073dab9a655d7d5b2b8f328de6f1a698bb not found: ID does not exist" containerID="5b990715ada23878c43bcbe5cbc733073dab9a655d7d5b2b8f328de6f1a698bb" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.763455 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b990715ada23878c43bcbe5cbc733073dab9a655d7d5b2b8f328de6f1a698bb"} err="failed to get container status \"5b990715ada23878c43bcbe5cbc733073dab9a655d7d5b2b8f328de6f1a698bb\": rpc error: code = NotFound desc = could not find container \"5b990715ada23878c43bcbe5cbc733073dab9a655d7d5b2b8f328de6f1a698bb\": container with ID starting with 5b990715ada23878c43bcbe5cbc733073dab9a655d7d5b2b8f328de6f1a698bb not found: ID does not exist" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.763476 4769 scope.go:117] "RemoveContainer" containerID="e75417a1d2dbd9077d526124cd6b104e98c2438812872f28b60f4c2ef935d9f2" Nov 25 10:44:09 crc kubenswrapper[4769]: E1125 10:44:09.763769 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e75417a1d2dbd9077d526124cd6b104e98c2438812872f28b60f4c2ef935d9f2\": container with ID starting with e75417a1d2dbd9077d526124cd6b104e98c2438812872f28b60f4c2ef935d9f2 not found: ID does not exist" containerID="e75417a1d2dbd9077d526124cd6b104e98c2438812872f28b60f4c2ef935d9f2" Nov 25 10:44:09 crc kubenswrapper[4769]: I1125 10:44:09.763843 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e75417a1d2dbd9077d526124cd6b104e98c2438812872f28b60f4c2ef935d9f2"} err="failed to get container status \"e75417a1d2dbd9077d526124cd6b104e98c2438812872f28b60f4c2ef935d9f2\": rpc error: code = NotFound desc = could not find container \"e75417a1d2dbd9077d526124cd6b104e98c2438812872f28b60f4c2ef935d9f2\": container with ID starting with e75417a1d2dbd9077d526124cd6b104e98c2438812872f28b60f4c2ef935d9f2 not found: ID does not exist" Nov 25 10:44:10 crc kubenswrapper[4769]: I1125 10:44:10.254346 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" path="/var/lib/kubelet/pods/fe774271-9b3e-42b7-ae1d-fbb70cb6ec91/volumes" Nov 25 10:44:13 crc kubenswrapper[4769]: I1125 10:44:13.376178 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:13 crc kubenswrapper[4769]: I1125 10:44:13.376771 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:13 crc kubenswrapper[4769]: I1125 10:44:13.469128 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:13 crc kubenswrapper[4769]: I1125 10:44:13.752666 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:14 crc kubenswrapper[4769]: I1125 10:44:14.626388 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rlcqw"] Nov 25 10:44:15 crc kubenswrapper[4769]: I1125 10:44:15.705919 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rlcqw" podUID="090ce897-cef4-4e61-80c9-1cc1c6dac92f" containerName="registry-server" containerID="cri-o://a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d" gracePeriod=2 Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.293474 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.406758 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mk9rv\" (UniqueName: \"kubernetes.io/projected/090ce897-cef4-4e61-80c9-1cc1c6dac92f-kube-api-access-mk9rv\") pod \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\" (UID: \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\") " Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.407169 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/090ce897-cef4-4e61-80c9-1cc1c6dac92f-catalog-content\") pod \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\" (UID: \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\") " Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.407225 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/090ce897-cef4-4e61-80c9-1cc1c6dac92f-utilities\") pod \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\" (UID: \"090ce897-cef4-4e61-80c9-1cc1c6dac92f\") " Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.408094 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/090ce897-cef4-4e61-80c9-1cc1c6dac92f-utilities" (OuterVolumeSpecName: "utilities") pod "090ce897-cef4-4e61-80c9-1cc1c6dac92f" (UID: "090ce897-cef4-4e61-80c9-1cc1c6dac92f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.414035 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/090ce897-cef4-4e61-80c9-1cc1c6dac92f-kube-api-access-mk9rv" (OuterVolumeSpecName: "kube-api-access-mk9rv") pod "090ce897-cef4-4e61-80c9-1cc1c6dac92f" (UID: "090ce897-cef4-4e61-80c9-1cc1c6dac92f"). InnerVolumeSpecName "kube-api-access-mk9rv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.424958 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/090ce897-cef4-4e61-80c9-1cc1c6dac92f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "090ce897-cef4-4e61-80c9-1cc1c6dac92f" (UID: "090ce897-cef4-4e61-80c9-1cc1c6dac92f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.509853 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/090ce897-cef4-4e61-80c9-1cc1c6dac92f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.509886 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/090ce897-cef4-4e61-80c9-1cc1c6dac92f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.509895 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mk9rv\" (UniqueName: \"kubernetes.io/projected/090ce897-cef4-4e61-80c9-1cc1c6dac92f-kube-api-access-mk9rv\") on node \"crc\" DevicePath \"\"" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.722191 4769 generic.go:334] "Generic (PLEG): container finished" podID="090ce897-cef4-4e61-80c9-1cc1c6dac92f" containerID="a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d" exitCode=0 Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.722229 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlcqw" event={"ID":"090ce897-cef4-4e61-80c9-1cc1c6dac92f","Type":"ContainerDied","Data":"a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d"} Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.722271 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlcqw" event={"ID":"090ce897-cef4-4e61-80c9-1cc1c6dac92f","Type":"ContainerDied","Data":"cfe33484693f6f9a20d54855923f74f68ec50bb0ca2b3c84e7aefc47bb23d959"} Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.722297 4769 scope.go:117] "RemoveContainer" containerID="a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.722373 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rlcqw" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.753198 4769 scope.go:117] "RemoveContainer" containerID="cf7efa2b0736105cb964bf8c919d8cdf6972a136efbd358c06ad72145a2758ed" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.791922 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rlcqw"] Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.801110 4769 scope.go:117] "RemoveContainer" containerID="0875c12095626685cd0dcd6d6326e59ad36de7c52fceb090ee50fc51408f090a" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.810508 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rlcqw"] Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.855947 4769 scope.go:117] "RemoveContainer" containerID="a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d" Nov 25 10:44:16 crc kubenswrapper[4769]: E1125 10:44:16.856531 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d\": container with ID starting with a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d not found: ID does not exist" containerID="a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.856673 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d"} err="failed to get container status \"a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d\": rpc error: code = NotFound desc = could not find container \"a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d\": container with ID starting with a7b2260f629fdb8e24b0bbd65cd514cf4232adade3b7150247cc7e795da3175d not found: ID does not exist" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.856768 4769 scope.go:117] "RemoveContainer" containerID="cf7efa2b0736105cb964bf8c919d8cdf6972a136efbd358c06ad72145a2758ed" Nov 25 10:44:16 crc kubenswrapper[4769]: E1125 10:44:16.857204 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf7efa2b0736105cb964bf8c919d8cdf6972a136efbd358c06ad72145a2758ed\": container with ID starting with cf7efa2b0736105cb964bf8c919d8cdf6972a136efbd358c06ad72145a2758ed not found: ID does not exist" containerID="cf7efa2b0736105cb964bf8c919d8cdf6972a136efbd358c06ad72145a2758ed" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.857286 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf7efa2b0736105cb964bf8c919d8cdf6972a136efbd358c06ad72145a2758ed"} err="failed to get container status \"cf7efa2b0736105cb964bf8c919d8cdf6972a136efbd358c06ad72145a2758ed\": rpc error: code = NotFound desc = could not find container \"cf7efa2b0736105cb964bf8c919d8cdf6972a136efbd358c06ad72145a2758ed\": container with ID starting with cf7efa2b0736105cb964bf8c919d8cdf6972a136efbd358c06ad72145a2758ed not found: ID does not exist" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.857306 4769 scope.go:117] "RemoveContainer" containerID="0875c12095626685cd0dcd6d6326e59ad36de7c52fceb090ee50fc51408f090a" Nov 25 10:44:16 crc kubenswrapper[4769]: E1125 10:44:16.857792 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0875c12095626685cd0dcd6d6326e59ad36de7c52fceb090ee50fc51408f090a\": container with ID starting with 0875c12095626685cd0dcd6d6326e59ad36de7c52fceb090ee50fc51408f090a not found: ID does not exist" containerID="0875c12095626685cd0dcd6d6326e59ad36de7c52fceb090ee50fc51408f090a" Nov 25 10:44:16 crc kubenswrapper[4769]: I1125 10:44:16.857842 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0875c12095626685cd0dcd6d6326e59ad36de7c52fceb090ee50fc51408f090a"} err="failed to get container status \"0875c12095626685cd0dcd6d6326e59ad36de7c52fceb090ee50fc51408f090a\": rpc error: code = NotFound desc = could not find container \"0875c12095626685cd0dcd6d6326e59ad36de7c52fceb090ee50fc51408f090a\": container with ID starting with 0875c12095626685cd0dcd6d6326e59ad36de7c52fceb090ee50fc51408f090a not found: ID does not exist" Nov 25 10:44:18 crc kubenswrapper[4769]: I1125 10:44:18.257338 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="090ce897-cef4-4e61-80c9-1cc1c6dac92f" path="/var/lib/kubelet/pods/090ce897-cef4-4e61-80c9-1cc1c6dac92f/volumes" Nov 25 10:44:52 crc kubenswrapper[4769]: I1125 10:44:52.290919 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:44:52 crc kubenswrapper[4769]: I1125 10:44:52.291442 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:44:58 crc kubenswrapper[4769]: E1125 10:44:58.936123 4769 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.201:33764->38.102.83.201:43143: write tcp 38.102.83.201:33764->38.102.83.201:43143: write: broken pipe Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.192499 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq"] Nov 25 10:45:00 crc kubenswrapper[4769]: E1125 10:45:00.194577 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" containerName="extract-utilities" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.194728 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" containerName="extract-utilities" Nov 25 10:45:00 crc kubenswrapper[4769]: E1125 10:45:00.194828 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" containerName="registry-server" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.194911 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" containerName="registry-server" Nov 25 10:45:00 crc kubenswrapper[4769]: E1125 10:45:00.195038 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="090ce897-cef4-4e61-80c9-1cc1c6dac92f" containerName="extract-content" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.195125 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="090ce897-cef4-4e61-80c9-1cc1c6dac92f" containerName="extract-content" Nov 25 10:45:00 crc kubenswrapper[4769]: E1125 10:45:00.195244 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="090ce897-cef4-4e61-80c9-1cc1c6dac92f" containerName="registry-server" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.195331 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="090ce897-cef4-4e61-80c9-1cc1c6dac92f" containerName="registry-server" Nov 25 10:45:00 crc kubenswrapper[4769]: E1125 10:45:00.195422 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="090ce897-cef4-4e61-80c9-1cc1c6dac92f" containerName="extract-utilities" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.195494 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="090ce897-cef4-4e61-80c9-1cc1c6dac92f" containerName="extract-utilities" Nov 25 10:45:00 crc kubenswrapper[4769]: E1125 10:45:00.195596 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" containerName="extract-content" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.195683 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" containerName="extract-content" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.196060 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="090ce897-cef4-4e61-80c9-1cc1c6dac92f" containerName="registry-server" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.196199 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe774271-9b3e-42b7-ae1d-fbb70cb6ec91" containerName="registry-server" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.199457 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.200431 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq"] Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.214282 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.214844 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.275206 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9scp6\" (UniqueName: \"kubernetes.io/projected/56bacf00-8c64-40d7-a1c5-48e833c79140-kube-api-access-9scp6\") pod \"collect-profiles-29401125-5h9tq\" (UID: \"56bacf00-8c64-40d7-a1c5-48e833c79140\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.275360 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/56bacf00-8c64-40d7-a1c5-48e833c79140-secret-volume\") pod \"collect-profiles-29401125-5h9tq\" (UID: \"56bacf00-8c64-40d7-a1c5-48e833c79140\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.275470 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/56bacf00-8c64-40d7-a1c5-48e833c79140-config-volume\") pod \"collect-profiles-29401125-5h9tq\" (UID: \"56bacf00-8c64-40d7-a1c5-48e833c79140\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.378325 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/56bacf00-8c64-40d7-a1c5-48e833c79140-config-volume\") pod \"collect-profiles-29401125-5h9tq\" (UID: \"56bacf00-8c64-40d7-a1c5-48e833c79140\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.378430 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9scp6\" (UniqueName: \"kubernetes.io/projected/56bacf00-8c64-40d7-a1c5-48e833c79140-kube-api-access-9scp6\") pod \"collect-profiles-29401125-5h9tq\" (UID: \"56bacf00-8c64-40d7-a1c5-48e833c79140\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.378591 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/56bacf00-8c64-40d7-a1c5-48e833c79140-secret-volume\") pod \"collect-profiles-29401125-5h9tq\" (UID: \"56bacf00-8c64-40d7-a1c5-48e833c79140\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.379393 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/56bacf00-8c64-40d7-a1c5-48e833c79140-config-volume\") pod \"collect-profiles-29401125-5h9tq\" (UID: \"56bacf00-8c64-40d7-a1c5-48e833c79140\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.384472 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/56bacf00-8c64-40d7-a1c5-48e833c79140-secret-volume\") pod \"collect-profiles-29401125-5h9tq\" (UID: \"56bacf00-8c64-40d7-a1c5-48e833c79140\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.399314 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9scp6\" (UniqueName: \"kubernetes.io/projected/56bacf00-8c64-40d7-a1c5-48e833c79140-kube-api-access-9scp6\") pod \"collect-profiles-29401125-5h9tq\" (UID: \"56bacf00-8c64-40d7-a1c5-48e833c79140\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:00 crc kubenswrapper[4769]: I1125 10:45:00.535314 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:01 crc kubenswrapper[4769]: W1125 10:45:01.027785 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56bacf00_8c64_40d7_a1c5_48e833c79140.slice/crio-aa59a555488088cacc250a2733ddf888468ae866eef319cc1b4fdf8a379ff90a WatchSource:0}: Error finding container aa59a555488088cacc250a2733ddf888468ae866eef319cc1b4fdf8a379ff90a: Status 404 returned error can't find the container with id aa59a555488088cacc250a2733ddf888468ae866eef319cc1b4fdf8a379ff90a Nov 25 10:45:01 crc kubenswrapper[4769]: I1125 10:45:01.036419 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq"] Nov 25 10:45:01 crc kubenswrapper[4769]: I1125 10:45:01.437496 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" event={"ID":"56bacf00-8c64-40d7-a1c5-48e833c79140","Type":"ContainerStarted","Data":"8a3545659e8e201990c775e04639a9b1eab6beb83d1249508619da7fbf088b8e"} Nov 25 10:45:01 crc kubenswrapper[4769]: I1125 10:45:01.437943 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" event={"ID":"56bacf00-8c64-40d7-a1c5-48e833c79140","Type":"ContainerStarted","Data":"aa59a555488088cacc250a2733ddf888468ae866eef319cc1b4fdf8a379ff90a"} Nov 25 10:45:01 crc kubenswrapper[4769]: I1125 10:45:01.461610 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" podStartSLOduration=1.461596598 podStartE2EDuration="1.461596598s" podCreationTimestamp="2025-11-25 10:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:45:01.45676967 +0000 UTC m=+3650.041741993" watchObservedRunningTime="2025-11-25 10:45:01.461596598 +0000 UTC m=+3650.046568901" Nov 25 10:45:02 crc kubenswrapper[4769]: I1125 10:45:02.451269 4769 generic.go:334] "Generic (PLEG): container finished" podID="56bacf00-8c64-40d7-a1c5-48e833c79140" containerID="8a3545659e8e201990c775e04639a9b1eab6beb83d1249508619da7fbf088b8e" exitCode=0 Nov 25 10:45:02 crc kubenswrapper[4769]: I1125 10:45:02.451318 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" event={"ID":"56bacf00-8c64-40d7-a1c5-48e833c79140","Type":"ContainerDied","Data":"8a3545659e8e201990c775e04639a9b1eab6beb83d1249508619da7fbf088b8e"} Nov 25 10:45:03 crc kubenswrapper[4769]: I1125 10:45:03.890140 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:03 crc kubenswrapper[4769]: I1125 10:45:03.962365 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9scp6\" (UniqueName: \"kubernetes.io/projected/56bacf00-8c64-40d7-a1c5-48e833c79140-kube-api-access-9scp6\") pod \"56bacf00-8c64-40d7-a1c5-48e833c79140\" (UID: \"56bacf00-8c64-40d7-a1c5-48e833c79140\") " Nov 25 10:45:03 crc kubenswrapper[4769]: I1125 10:45:03.962415 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/56bacf00-8c64-40d7-a1c5-48e833c79140-secret-volume\") pod \"56bacf00-8c64-40d7-a1c5-48e833c79140\" (UID: \"56bacf00-8c64-40d7-a1c5-48e833c79140\") " Nov 25 10:45:03 crc kubenswrapper[4769]: I1125 10:45:03.962438 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/56bacf00-8c64-40d7-a1c5-48e833c79140-config-volume\") pod \"56bacf00-8c64-40d7-a1c5-48e833c79140\" (UID: \"56bacf00-8c64-40d7-a1c5-48e833c79140\") " Nov 25 10:45:03 crc kubenswrapper[4769]: I1125 10:45:03.963913 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56bacf00-8c64-40d7-a1c5-48e833c79140-config-volume" (OuterVolumeSpecName: "config-volume") pod "56bacf00-8c64-40d7-a1c5-48e833c79140" (UID: "56bacf00-8c64-40d7-a1c5-48e833c79140"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:45:03 crc kubenswrapper[4769]: I1125 10:45:03.970810 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56bacf00-8c64-40d7-a1c5-48e833c79140-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "56bacf00-8c64-40d7-a1c5-48e833c79140" (UID: "56bacf00-8c64-40d7-a1c5-48e833c79140"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:45:03 crc kubenswrapper[4769]: I1125 10:45:03.971009 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56bacf00-8c64-40d7-a1c5-48e833c79140-kube-api-access-9scp6" (OuterVolumeSpecName: "kube-api-access-9scp6") pod "56bacf00-8c64-40d7-a1c5-48e833c79140" (UID: "56bacf00-8c64-40d7-a1c5-48e833c79140"). InnerVolumeSpecName "kube-api-access-9scp6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:45:04 crc kubenswrapper[4769]: I1125 10:45:04.065903 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9scp6\" (UniqueName: \"kubernetes.io/projected/56bacf00-8c64-40d7-a1c5-48e833c79140-kube-api-access-9scp6\") on node \"crc\" DevicePath \"\"" Nov 25 10:45:04 crc kubenswrapper[4769]: I1125 10:45:04.065937 4769 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/56bacf00-8c64-40d7-a1c5-48e833c79140-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:45:04 crc kubenswrapper[4769]: I1125 10:45:04.065947 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/56bacf00-8c64-40d7-a1c5-48e833c79140-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:45:04 crc kubenswrapper[4769]: I1125 10:45:04.481717 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" event={"ID":"56bacf00-8c64-40d7-a1c5-48e833c79140","Type":"ContainerDied","Data":"aa59a555488088cacc250a2733ddf888468ae866eef319cc1b4fdf8a379ff90a"} Nov 25 10:45:04 crc kubenswrapper[4769]: I1125 10:45:04.481775 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa59a555488088cacc250a2733ddf888468ae866eef319cc1b4fdf8a379ff90a" Nov 25 10:45:04 crc kubenswrapper[4769]: I1125 10:45:04.481818 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-5h9tq" Nov 25 10:45:04 crc kubenswrapper[4769]: I1125 10:45:04.542973 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv"] Nov 25 10:45:04 crc kubenswrapper[4769]: I1125 10:45:04.553782 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-9ntlv"] Nov 25 10:45:06 crc kubenswrapper[4769]: I1125 10:45:06.255535 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ebbb140-bc0e-47b2-9d3e-b8db0f609729" path="/var/lib/kubelet/pods/3ebbb140-bc0e-47b2-9d3e-b8db0f609729/volumes" Nov 25 10:45:22 crc kubenswrapper[4769]: I1125 10:45:22.289991 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:45:22 crc kubenswrapper[4769]: I1125 10:45:22.290474 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:45:48 crc kubenswrapper[4769]: I1125 10:45:48.494708 4769 scope.go:117] "RemoveContainer" containerID="229f9e06c806c3ae721029fd48a755938572a4b374f11c6269067d347ee130c2" Nov 25 10:45:52 crc kubenswrapper[4769]: I1125 10:45:52.290706 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:45:52 crc kubenswrapper[4769]: I1125 10:45:52.291238 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:45:52 crc kubenswrapper[4769]: I1125 10:45:52.291294 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 10:45:52 crc kubenswrapper[4769]: I1125 10:45:52.292335 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:45:52 crc kubenswrapper[4769]: I1125 10:45:52.292382 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" gracePeriod=600 Nov 25 10:45:52 crc kubenswrapper[4769]: E1125 10:45:52.414357 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:45:53 crc kubenswrapper[4769]: I1125 10:45:53.063589 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" exitCode=0 Nov 25 10:45:53 crc kubenswrapper[4769]: I1125 10:45:53.063629 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2"} Nov 25 10:45:53 crc kubenswrapper[4769]: I1125 10:45:53.063662 4769 scope.go:117] "RemoveContainer" containerID="2dda2b74a193e53366972e7a46dc5b774e7871f50dd86ba2a2329a05e9765b6f" Nov 25 10:45:53 crc kubenswrapper[4769]: I1125 10:45:53.065719 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:45:53 crc kubenswrapper[4769]: E1125 10:45:53.066377 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:46:05 crc kubenswrapper[4769]: I1125 10:46:05.237598 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:46:05 crc kubenswrapper[4769]: E1125 10:46:05.238577 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:46:19 crc kubenswrapper[4769]: I1125 10:46:19.237639 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:46:19 crc kubenswrapper[4769]: E1125 10:46:19.238670 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:46:32 crc kubenswrapper[4769]: I1125 10:46:32.245069 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:46:32 crc kubenswrapper[4769]: E1125 10:46:32.245941 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:46:44 crc kubenswrapper[4769]: I1125 10:46:44.237521 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:46:44 crc kubenswrapper[4769]: E1125 10:46:44.238319 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:46:57 crc kubenswrapper[4769]: I1125 10:46:57.237903 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:46:57 crc kubenswrapper[4769]: E1125 10:46:57.239237 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:47:08 crc kubenswrapper[4769]: I1125 10:47:08.238757 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:47:08 crc kubenswrapper[4769]: E1125 10:47:08.239603 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:47:21 crc kubenswrapper[4769]: I1125 10:47:21.237360 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:47:21 crc kubenswrapper[4769]: E1125 10:47:21.238253 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:47:36 crc kubenswrapper[4769]: I1125 10:47:36.254550 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:47:36 crc kubenswrapper[4769]: E1125 10:47:36.255576 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:47:51 crc kubenswrapper[4769]: I1125 10:47:51.237635 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:47:51 crc kubenswrapper[4769]: E1125 10:47:51.238941 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:47:57 crc kubenswrapper[4769]: E1125 10:47:57.607289 4769 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.201:32986->38.102.83.201:43143: write tcp 38.102.83.201:32986->38.102.83.201:43143: write: broken pipe Nov 25 10:48:04 crc kubenswrapper[4769]: I1125 10:48:04.238281 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:48:04 crc kubenswrapper[4769]: E1125 10:48:04.239593 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:48:12 crc kubenswrapper[4769]: E1125 10:48:12.282040 4769 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.201:43402->38.102.83.201:43143: write tcp 38.102.83.201:43402->38.102.83.201:43143: write: broken pipe Nov 25 10:48:16 crc kubenswrapper[4769]: I1125 10:48:16.238082 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:48:16 crc kubenswrapper[4769]: E1125 10:48:16.238990 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:48:31 crc kubenswrapper[4769]: I1125 10:48:31.236948 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:48:31 crc kubenswrapper[4769]: E1125 10:48:31.237787 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:48:43 crc kubenswrapper[4769]: I1125 10:48:43.237011 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:48:43 crc kubenswrapper[4769]: E1125 10:48:43.238042 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:48:54 crc kubenswrapper[4769]: I1125 10:48:54.241171 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:48:54 crc kubenswrapper[4769]: E1125 10:48:54.242443 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:49:08 crc kubenswrapper[4769]: I1125 10:49:08.237707 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:49:08 crc kubenswrapper[4769]: E1125 10:49:08.238793 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:49:20 crc kubenswrapper[4769]: I1125 10:49:20.237814 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:49:20 crc kubenswrapper[4769]: E1125 10:49:20.238953 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:49:31 crc kubenswrapper[4769]: I1125 10:49:31.237022 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:49:31 crc kubenswrapper[4769]: E1125 10:49:31.237861 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:49:45 crc kubenswrapper[4769]: I1125 10:49:45.237248 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:49:45 crc kubenswrapper[4769]: E1125 10:49:45.238240 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:49:59 crc kubenswrapper[4769]: I1125 10:49:59.237616 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:49:59 crc kubenswrapper[4769]: E1125 10:49:59.238675 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.543446 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lnfvl"] Nov 25 10:50:07 crc kubenswrapper[4769]: E1125 10:50:07.544539 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56bacf00-8c64-40d7-a1c5-48e833c79140" containerName="collect-profiles" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.544555 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="56bacf00-8c64-40d7-a1c5-48e833c79140" containerName="collect-profiles" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.544841 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="56bacf00-8c64-40d7-a1c5-48e833c79140" containerName="collect-profiles" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.551343 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.575257 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lnfvl"] Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.586995 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-utilities\") pod \"certified-operators-lnfvl\" (UID: \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\") " pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.587061 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsh84\" (UniqueName: \"kubernetes.io/projected/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-kube-api-access-fsh84\") pod \"certified-operators-lnfvl\" (UID: \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\") " pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.587347 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-catalog-content\") pod \"certified-operators-lnfvl\" (UID: \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\") " pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.689156 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-catalog-content\") pod \"certified-operators-lnfvl\" (UID: \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\") " pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.689502 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-utilities\") pod \"certified-operators-lnfvl\" (UID: \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\") " pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.689527 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsh84\" (UniqueName: \"kubernetes.io/projected/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-kube-api-access-fsh84\") pod \"certified-operators-lnfvl\" (UID: \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\") " pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.689889 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-utilities\") pod \"certified-operators-lnfvl\" (UID: \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\") " pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.690144 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-catalog-content\") pod \"certified-operators-lnfvl\" (UID: \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\") " pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.720195 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsh84\" (UniqueName: \"kubernetes.io/projected/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-kube-api-access-fsh84\") pod \"certified-operators-lnfvl\" (UID: \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\") " pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:07 crc kubenswrapper[4769]: I1125 10:50:07.877597 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:08 crc kubenswrapper[4769]: I1125 10:50:08.459212 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lnfvl"] Nov 25 10:50:09 crc kubenswrapper[4769]: I1125 10:50:09.306789 4769 generic.go:334] "Generic (PLEG): container finished" podID="152cb42a-4baf-4eae-b7c9-b6ab56d6d258" containerID="fb53acb257f54d5b1e979424e6457e9ae5671f226f6365face586a382f2b786a" exitCode=0 Nov 25 10:50:09 crc kubenswrapper[4769]: I1125 10:50:09.306917 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnfvl" event={"ID":"152cb42a-4baf-4eae-b7c9-b6ab56d6d258","Type":"ContainerDied","Data":"fb53acb257f54d5b1e979424e6457e9ae5671f226f6365face586a382f2b786a"} Nov 25 10:50:09 crc kubenswrapper[4769]: I1125 10:50:09.307509 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnfvl" event={"ID":"152cb42a-4baf-4eae-b7c9-b6ab56d6d258","Type":"ContainerStarted","Data":"d2bfe4743bfe4797f4e2ee6cb4aacc06c6f59218aca0986543da7cf02ec36487"} Nov 25 10:50:09 crc kubenswrapper[4769]: I1125 10:50:09.310374 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:50:10 crc kubenswrapper[4769]: I1125 10:50:10.324296 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnfvl" event={"ID":"152cb42a-4baf-4eae-b7c9-b6ab56d6d258","Type":"ContainerStarted","Data":"48d517e5feaa61d92f164ec866787c6b29e033626dc6d7b732fadbc29d6b4f60"} Nov 25 10:50:12 crc kubenswrapper[4769]: I1125 10:50:12.351858 4769 generic.go:334] "Generic (PLEG): container finished" podID="152cb42a-4baf-4eae-b7c9-b6ab56d6d258" containerID="48d517e5feaa61d92f164ec866787c6b29e033626dc6d7b732fadbc29d6b4f60" exitCode=0 Nov 25 10:50:12 crc kubenswrapper[4769]: I1125 10:50:12.352037 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnfvl" event={"ID":"152cb42a-4baf-4eae-b7c9-b6ab56d6d258","Type":"ContainerDied","Data":"48d517e5feaa61d92f164ec866787c6b29e033626dc6d7b732fadbc29d6b4f60"} Nov 25 10:50:13 crc kubenswrapper[4769]: I1125 10:50:13.366858 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnfvl" event={"ID":"152cb42a-4baf-4eae-b7c9-b6ab56d6d258","Type":"ContainerStarted","Data":"679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12"} Nov 25 10:50:13 crc kubenswrapper[4769]: I1125 10:50:13.395114 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lnfvl" podStartSLOduration=2.92181727 podStartE2EDuration="6.395094427s" podCreationTimestamp="2025-11-25 10:50:07 +0000 UTC" firstStartedPulling="2025-11-25 10:50:09.310140406 +0000 UTC m=+3957.895112719" lastFinishedPulling="2025-11-25 10:50:12.783417543 +0000 UTC m=+3961.368389876" observedRunningTime="2025-11-25 10:50:13.393420993 +0000 UTC m=+3961.978393316" watchObservedRunningTime="2025-11-25 10:50:13.395094427 +0000 UTC m=+3961.980066750" Nov 25 10:50:14 crc kubenswrapper[4769]: I1125 10:50:14.237507 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:50:14 crc kubenswrapper[4769]: E1125 10:50:14.238176 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:50:17 crc kubenswrapper[4769]: I1125 10:50:17.878340 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:17 crc kubenswrapper[4769]: I1125 10:50:17.878683 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:18 crc kubenswrapper[4769]: I1125 10:50:18.139894 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:18 crc kubenswrapper[4769]: I1125 10:50:18.470515 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:18 crc kubenswrapper[4769]: I1125 10:50:18.533998 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lnfvl"] Nov 25 10:50:20 crc kubenswrapper[4769]: I1125 10:50:20.448134 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lnfvl" podUID="152cb42a-4baf-4eae-b7c9-b6ab56d6d258" containerName="registry-server" containerID="cri-o://679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12" gracePeriod=2 Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.017728 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.074787 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-catalog-content\") pod \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\" (UID: \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\") " Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.105198 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-utilities\") pod \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\" (UID: \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\") " Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.105357 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fsh84\" (UniqueName: \"kubernetes.io/projected/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-kube-api-access-fsh84\") pod \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\" (UID: \"152cb42a-4baf-4eae-b7c9-b6ab56d6d258\") " Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.110470 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-utilities" (OuterVolumeSpecName: "utilities") pod "152cb42a-4baf-4eae-b7c9-b6ab56d6d258" (UID: "152cb42a-4baf-4eae-b7c9-b6ab56d6d258"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.119184 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-kube-api-access-fsh84" (OuterVolumeSpecName: "kube-api-access-fsh84") pod "152cb42a-4baf-4eae-b7c9-b6ab56d6d258" (UID: "152cb42a-4baf-4eae-b7c9-b6ab56d6d258"). InnerVolumeSpecName "kube-api-access-fsh84". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.132898 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "152cb42a-4baf-4eae-b7c9-b6ab56d6d258" (UID: "152cb42a-4baf-4eae-b7c9-b6ab56d6d258"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.211494 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.211545 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fsh84\" (UniqueName: \"kubernetes.io/projected/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-kube-api-access-fsh84\") on node \"crc\" DevicePath \"\"" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.211560 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/152cb42a-4baf-4eae-b7c9-b6ab56d6d258-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.460892 4769 generic.go:334] "Generic (PLEG): container finished" podID="152cb42a-4baf-4eae-b7c9-b6ab56d6d258" containerID="679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12" exitCode=0 Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.461224 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnfvl" event={"ID":"152cb42a-4baf-4eae-b7c9-b6ab56d6d258","Type":"ContainerDied","Data":"679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12"} Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.461299 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lnfvl" event={"ID":"152cb42a-4baf-4eae-b7c9-b6ab56d6d258","Type":"ContainerDied","Data":"d2bfe4743bfe4797f4e2ee6cb4aacc06c6f59218aca0986543da7cf02ec36487"} Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.461316 4769 scope.go:117] "RemoveContainer" containerID="679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.461241 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lnfvl" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.486487 4769 scope.go:117] "RemoveContainer" containerID="48d517e5feaa61d92f164ec866787c6b29e033626dc6d7b732fadbc29d6b4f60" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.515446 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lnfvl"] Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.519340 4769 scope.go:117] "RemoveContainer" containerID="fb53acb257f54d5b1e979424e6457e9ae5671f226f6365face586a382f2b786a" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.527198 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lnfvl"] Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.600333 4769 scope.go:117] "RemoveContainer" containerID="679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12" Nov 25 10:50:21 crc kubenswrapper[4769]: E1125 10:50:21.600710 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12\": container with ID starting with 679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12 not found: ID does not exist" containerID="679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.600739 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12"} err="failed to get container status \"679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12\": rpc error: code = NotFound desc = could not find container \"679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12\": container with ID starting with 679bc2c37b9648bfa90056c76fc6ada90f99c9d9874612b0ec0545bc5d5b4a12 not found: ID does not exist" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.600760 4769 scope.go:117] "RemoveContainer" containerID="48d517e5feaa61d92f164ec866787c6b29e033626dc6d7b732fadbc29d6b4f60" Nov 25 10:50:21 crc kubenswrapper[4769]: E1125 10:50:21.600975 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48d517e5feaa61d92f164ec866787c6b29e033626dc6d7b732fadbc29d6b4f60\": container with ID starting with 48d517e5feaa61d92f164ec866787c6b29e033626dc6d7b732fadbc29d6b4f60 not found: ID does not exist" containerID="48d517e5feaa61d92f164ec866787c6b29e033626dc6d7b732fadbc29d6b4f60" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.600994 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48d517e5feaa61d92f164ec866787c6b29e033626dc6d7b732fadbc29d6b4f60"} err="failed to get container status \"48d517e5feaa61d92f164ec866787c6b29e033626dc6d7b732fadbc29d6b4f60\": rpc error: code = NotFound desc = could not find container \"48d517e5feaa61d92f164ec866787c6b29e033626dc6d7b732fadbc29d6b4f60\": container with ID starting with 48d517e5feaa61d92f164ec866787c6b29e033626dc6d7b732fadbc29d6b4f60 not found: ID does not exist" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.601006 4769 scope.go:117] "RemoveContainer" containerID="fb53acb257f54d5b1e979424e6457e9ae5671f226f6365face586a382f2b786a" Nov 25 10:50:21 crc kubenswrapper[4769]: E1125 10:50:21.601215 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb53acb257f54d5b1e979424e6457e9ae5671f226f6365face586a382f2b786a\": container with ID starting with fb53acb257f54d5b1e979424e6457e9ae5671f226f6365face586a382f2b786a not found: ID does not exist" containerID="fb53acb257f54d5b1e979424e6457e9ae5671f226f6365face586a382f2b786a" Nov 25 10:50:21 crc kubenswrapper[4769]: I1125 10:50:21.601238 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb53acb257f54d5b1e979424e6457e9ae5671f226f6365face586a382f2b786a"} err="failed to get container status \"fb53acb257f54d5b1e979424e6457e9ae5671f226f6365face586a382f2b786a\": rpc error: code = NotFound desc = could not find container \"fb53acb257f54d5b1e979424e6457e9ae5671f226f6365face586a382f2b786a\": container with ID starting with fb53acb257f54d5b1e979424e6457e9ae5671f226f6365face586a382f2b786a not found: ID does not exist" Nov 25 10:50:22 crc kubenswrapper[4769]: I1125 10:50:22.253226 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="152cb42a-4baf-4eae-b7c9-b6ab56d6d258" path="/var/lib/kubelet/pods/152cb42a-4baf-4eae-b7c9-b6ab56d6d258/volumes" Nov 25 10:50:25 crc kubenswrapper[4769]: I1125 10:50:25.236911 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:50:25 crc kubenswrapper[4769]: E1125 10:50:25.238053 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:50:39 crc kubenswrapper[4769]: I1125 10:50:39.239128 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:50:39 crc kubenswrapper[4769]: E1125 10:50:39.240077 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:50:52 crc kubenswrapper[4769]: I1125 10:50:52.244935 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:50:52 crc kubenswrapper[4769]: E1125 10:50:52.245692 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:51:03 crc kubenswrapper[4769]: I1125 10:51:03.236712 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:51:03 crc kubenswrapper[4769]: I1125 10:51:03.993446 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"6ee7bc9266cc374f05539fb2582761256290e07c7dda15eb655fce905ed00be0"} Nov 25 10:53:22 crc kubenswrapper[4769]: I1125 10:53:22.290552 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:53:22 crc kubenswrapper[4769]: I1125 10:53:22.291149 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:53:52 crc kubenswrapper[4769]: I1125 10:53:52.290388 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:53:52 crc kubenswrapper[4769]: I1125 10:53:52.292272 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:54:22 crc kubenswrapper[4769]: I1125 10:54:22.290374 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:54:22 crc kubenswrapper[4769]: I1125 10:54:22.291242 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:54:22 crc kubenswrapper[4769]: I1125 10:54:22.291709 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 10:54:22 crc kubenswrapper[4769]: I1125 10:54:22.293172 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6ee7bc9266cc374f05539fb2582761256290e07c7dda15eb655fce905ed00be0"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:54:22 crc kubenswrapper[4769]: I1125 10:54:22.293307 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://6ee7bc9266cc374f05539fb2582761256290e07c7dda15eb655fce905ed00be0" gracePeriod=600 Nov 25 10:54:22 crc kubenswrapper[4769]: I1125 10:54:22.971006 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="6ee7bc9266cc374f05539fb2582761256290e07c7dda15eb655fce905ed00be0" exitCode=0 Nov 25 10:54:22 crc kubenswrapper[4769]: I1125 10:54:22.971127 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"6ee7bc9266cc374f05539fb2582761256290e07c7dda15eb655fce905ed00be0"} Nov 25 10:54:22 crc kubenswrapper[4769]: I1125 10:54:22.971463 4769 scope.go:117] "RemoveContainer" containerID="72a4bb4929a46b44faab0b605d8ae573c21befda60694925e13067caa64d77a2" Nov 25 10:54:23 crc kubenswrapper[4769]: I1125 10:54:23.984659 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05"} Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.761339 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2l76j"] Nov 25 10:54:35 crc kubenswrapper[4769]: E1125 10:54:35.762608 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="152cb42a-4baf-4eae-b7c9-b6ab56d6d258" containerName="extract-utilities" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.762629 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="152cb42a-4baf-4eae-b7c9-b6ab56d6d258" containerName="extract-utilities" Nov 25 10:54:35 crc kubenswrapper[4769]: E1125 10:54:35.762655 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="152cb42a-4baf-4eae-b7c9-b6ab56d6d258" containerName="extract-content" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.762664 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="152cb42a-4baf-4eae-b7c9-b6ab56d6d258" containerName="extract-content" Nov 25 10:54:35 crc kubenswrapper[4769]: E1125 10:54:35.762682 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="152cb42a-4baf-4eae-b7c9-b6ab56d6d258" containerName="registry-server" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.762713 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="152cb42a-4baf-4eae-b7c9-b6ab56d6d258" containerName="registry-server" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.763056 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="152cb42a-4baf-4eae-b7c9-b6ab56d6d258" containerName="registry-server" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.765151 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.778200 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2l76j"] Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.846463 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-catalog-content\") pod \"community-operators-2l76j\" (UID: \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\") " pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.846641 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-utilities\") pod \"community-operators-2l76j\" (UID: \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\") " pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.846681 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7pmx\" (UniqueName: \"kubernetes.io/projected/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-kube-api-access-x7pmx\") pod \"community-operators-2l76j\" (UID: \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\") " pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.948321 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-catalog-content\") pod \"community-operators-2l76j\" (UID: \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\") " pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.948430 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-utilities\") pod \"community-operators-2l76j\" (UID: \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\") " pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.948456 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7pmx\" (UniqueName: \"kubernetes.io/projected/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-kube-api-access-x7pmx\") pod \"community-operators-2l76j\" (UID: \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\") " pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.948928 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-catalog-content\") pod \"community-operators-2l76j\" (UID: \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\") " pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.948990 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-utilities\") pod \"community-operators-2l76j\" (UID: \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\") " pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:35 crc kubenswrapper[4769]: I1125 10:54:35.972979 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7pmx\" (UniqueName: \"kubernetes.io/projected/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-kube-api-access-x7pmx\") pod \"community-operators-2l76j\" (UID: \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\") " pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:36 crc kubenswrapper[4769]: I1125 10:54:36.090689 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:36 crc kubenswrapper[4769]: I1125 10:54:36.714664 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2l76j"] Nov 25 10:54:36 crc kubenswrapper[4769]: W1125 10:54:36.726753 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2fd904e3_ad5c_4b23_8a7d_52785a3848c7.slice/crio-fe228ef8e421e689bb6beda61cb40ec2f75ff27eed625826eeba86a93370ea89 WatchSource:0}: Error finding container fe228ef8e421e689bb6beda61cb40ec2f75ff27eed625826eeba86a93370ea89: Status 404 returned error can't find the container with id fe228ef8e421e689bb6beda61cb40ec2f75ff27eed625826eeba86a93370ea89 Nov 25 10:54:37 crc kubenswrapper[4769]: I1125 10:54:37.119288 4769 generic.go:334] "Generic (PLEG): container finished" podID="2fd904e3-ad5c-4b23-8a7d-52785a3848c7" containerID="3d93bf10d2aeee847d6fc618924072ac5e40d0d01b5a33e416085e647ff06548" exitCode=0 Nov 25 10:54:37 crc kubenswrapper[4769]: I1125 10:54:37.119334 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2l76j" event={"ID":"2fd904e3-ad5c-4b23-8a7d-52785a3848c7","Type":"ContainerDied","Data":"3d93bf10d2aeee847d6fc618924072ac5e40d0d01b5a33e416085e647ff06548"} Nov 25 10:54:37 crc kubenswrapper[4769]: I1125 10:54:37.119362 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2l76j" event={"ID":"2fd904e3-ad5c-4b23-8a7d-52785a3848c7","Type":"ContainerStarted","Data":"fe228ef8e421e689bb6beda61cb40ec2f75ff27eed625826eeba86a93370ea89"} Nov 25 10:54:38 crc kubenswrapper[4769]: I1125 10:54:38.134203 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2l76j" event={"ID":"2fd904e3-ad5c-4b23-8a7d-52785a3848c7","Type":"ContainerStarted","Data":"a13a410a917a13dc6de7daaebefa99ac50864ca8edcdf47cc7d822768294c0f7"} Nov 25 10:54:40 crc kubenswrapper[4769]: I1125 10:54:40.165540 4769 generic.go:334] "Generic (PLEG): container finished" podID="2fd904e3-ad5c-4b23-8a7d-52785a3848c7" containerID="a13a410a917a13dc6de7daaebefa99ac50864ca8edcdf47cc7d822768294c0f7" exitCode=0 Nov 25 10:54:40 crc kubenswrapper[4769]: I1125 10:54:40.165589 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2l76j" event={"ID":"2fd904e3-ad5c-4b23-8a7d-52785a3848c7","Type":"ContainerDied","Data":"a13a410a917a13dc6de7daaebefa99ac50864ca8edcdf47cc7d822768294c0f7"} Nov 25 10:54:41 crc kubenswrapper[4769]: I1125 10:54:41.179760 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2l76j" event={"ID":"2fd904e3-ad5c-4b23-8a7d-52785a3848c7","Type":"ContainerStarted","Data":"3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259"} Nov 25 10:54:41 crc kubenswrapper[4769]: I1125 10:54:41.209957 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2l76j" podStartSLOduration=2.720670965 podStartE2EDuration="6.209932368s" podCreationTimestamp="2025-11-25 10:54:35 +0000 UTC" firstStartedPulling="2025-11-25 10:54:37.121555814 +0000 UTC m=+4225.706528127" lastFinishedPulling="2025-11-25 10:54:40.610817217 +0000 UTC m=+4229.195789530" observedRunningTime="2025-11-25 10:54:41.205680995 +0000 UTC m=+4229.790653328" watchObservedRunningTime="2025-11-25 10:54:41.209932368 +0000 UTC m=+4229.794904691" Nov 25 10:54:46 crc kubenswrapper[4769]: I1125 10:54:46.091233 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:46 crc kubenswrapper[4769]: I1125 10:54:46.092420 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:46 crc kubenswrapper[4769]: I1125 10:54:46.433547 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:47 crc kubenswrapper[4769]: I1125 10:54:47.327015 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:47 crc kubenswrapper[4769]: I1125 10:54:47.400206 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2l76j"] Nov 25 10:54:49 crc kubenswrapper[4769]: I1125 10:54:49.274784 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2l76j" podUID="2fd904e3-ad5c-4b23-8a7d-52785a3848c7" containerName="registry-server" containerID="cri-o://3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259" gracePeriod=2 Nov 25 10:54:49 crc kubenswrapper[4769]: I1125 10:54:49.888219 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:49 crc kubenswrapper[4769]: I1125 10:54:49.950362 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7pmx\" (UniqueName: \"kubernetes.io/projected/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-kube-api-access-x7pmx\") pod \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\" (UID: \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\") " Nov 25 10:54:49 crc kubenswrapper[4769]: I1125 10:54:49.950567 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-utilities\") pod \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\" (UID: \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\") " Nov 25 10:54:49 crc kubenswrapper[4769]: I1125 10:54:49.950696 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-catalog-content\") pod \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\" (UID: \"2fd904e3-ad5c-4b23-8a7d-52785a3848c7\") " Nov 25 10:54:49 crc kubenswrapper[4769]: I1125 10:54:49.951778 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-utilities" (OuterVolumeSpecName: "utilities") pod "2fd904e3-ad5c-4b23-8a7d-52785a3848c7" (UID: "2fd904e3-ad5c-4b23-8a7d-52785a3848c7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:54:49 crc kubenswrapper[4769]: I1125 10:54:49.952586 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:54:49 crc kubenswrapper[4769]: I1125 10:54:49.957865 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-kube-api-access-x7pmx" (OuterVolumeSpecName: "kube-api-access-x7pmx") pod "2fd904e3-ad5c-4b23-8a7d-52785a3848c7" (UID: "2fd904e3-ad5c-4b23-8a7d-52785a3848c7"). InnerVolumeSpecName "kube-api-access-x7pmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.006615 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2fd904e3-ad5c-4b23-8a7d-52785a3848c7" (UID: "2fd904e3-ad5c-4b23-8a7d-52785a3848c7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.054098 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.054138 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7pmx\" (UniqueName: \"kubernetes.io/projected/2fd904e3-ad5c-4b23-8a7d-52785a3848c7-kube-api-access-x7pmx\") on node \"crc\" DevicePath \"\"" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.291434 4769 generic.go:334] "Generic (PLEG): container finished" podID="2fd904e3-ad5c-4b23-8a7d-52785a3848c7" containerID="3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259" exitCode=0 Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.291512 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2l76j" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.291519 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2l76j" event={"ID":"2fd904e3-ad5c-4b23-8a7d-52785a3848c7","Type":"ContainerDied","Data":"3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259"} Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.292237 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2l76j" event={"ID":"2fd904e3-ad5c-4b23-8a7d-52785a3848c7","Type":"ContainerDied","Data":"fe228ef8e421e689bb6beda61cb40ec2f75ff27eed625826eeba86a93370ea89"} Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.292261 4769 scope.go:117] "RemoveContainer" containerID="3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.318063 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2l76j"] Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.330710 4769 scope.go:117] "RemoveContainer" containerID="a13a410a917a13dc6de7daaebefa99ac50864ca8edcdf47cc7d822768294c0f7" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.333981 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2l76j"] Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.363685 4769 scope.go:117] "RemoveContainer" containerID="3d93bf10d2aeee847d6fc618924072ac5e40d0d01b5a33e416085e647ff06548" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.420599 4769 scope.go:117] "RemoveContainer" containerID="3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259" Nov 25 10:54:50 crc kubenswrapper[4769]: E1125 10:54:50.422151 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259\": container with ID starting with 3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259 not found: ID does not exist" containerID="3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.422191 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259"} err="failed to get container status \"3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259\": rpc error: code = NotFound desc = could not find container \"3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259\": container with ID starting with 3e7b666cb71632e4ddb9f5270055041a50504567c8c059915363af24fed28259 not found: ID does not exist" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.422219 4769 scope.go:117] "RemoveContainer" containerID="a13a410a917a13dc6de7daaebefa99ac50864ca8edcdf47cc7d822768294c0f7" Nov 25 10:54:50 crc kubenswrapper[4769]: E1125 10:54:50.422572 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a13a410a917a13dc6de7daaebefa99ac50864ca8edcdf47cc7d822768294c0f7\": container with ID starting with a13a410a917a13dc6de7daaebefa99ac50864ca8edcdf47cc7d822768294c0f7 not found: ID does not exist" containerID="a13a410a917a13dc6de7daaebefa99ac50864ca8edcdf47cc7d822768294c0f7" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.422621 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a13a410a917a13dc6de7daaebefa99ac50864ca8edcdf47cc7d822768294c0f7"} err="failed to get container status \"a13a410a917a13dc6de7daaebefa99ac50864ca8edcdf47cc7d822768294c0f7\": rpc error: code = NotFound desc = could not find container \"a13a410a917a13dc6de7daaebefa99ac50864ca8edcdf47cc7d822768294c0f7\": container with ID starting with a13a410a917a13dc6de7daaebefa99ac50864ca8edcdf47cc7d822768294c0f7 not found: ID does not exist" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.422641 4769 scope.go:117] "RemoveContainer" containerID="3d93bf10d2aeee847d6fc618924072ac5e40d0d01b5a33e416085e647ff06548" Nov 25 10:54:50 crc kubenswrapper[4769]: E1125 10:54:50.422915 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d93bf10d2aeee847d6fc618924072ac5e40d0d01b5a33e416085e647ff06548\": container with ID starting with 3d93bf10d2aeee847d6fc618924072ac5e40d0d01b5a33e416085e647ff06548 not found: ID does not exist" containerID="3d93bf10d2aeee847d6fc618924072ac5e40d0d01b5a33e416085e647ff06548" Nov 25 10:54:50 crc kubenswrapper[4769]: I1125 10:54:50.422956 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d93bf10d2aeee847d6fc618924072ac5e40d0d01b5a33e416085e647ff06548"} err="failed to get container status \"3d93bf10d2aeee847d6fc618924072ac5e40d0d01b5a33e416085e647ff06548\": rpc error: code = NotFound desc = could not find container \"3d93bf10d2aeee847d6fc618924072ac5e40d0d01b5a33e416085e647ff06548\": container with ID starting with 3d93bf10d2aeee847d6fc618924072ac5e40d0d01b5a33e416085e647ff06548 not found: ID does not exist" Nov 25 10:54:52 crc kubenswrapper[4769]: I1125 10:54:52.252616 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fd904e3-ad5c-4b23-8a7d-52785a3848c7" path="/var/lib/kubelet/pods/2fd904e3-ad5c-4b23-8a7d-52785a3848c7/volumes" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.194720 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6szml"] Nov 25 10:54:53 crc kubenswrapper[4769]: E1125 10:54:53.195684 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd904e3-ad5c-4b23-8a7d-52785a3848c7" containerName="extract-content" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.195707 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd904e3-ad5c-4b23-8a7d-52785a3848c7" containerName="extract-content" Nov 25 10:54:53 crc kubenswrapper[4769]: E1125 10:54:53.195755 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd904e3-ad5c-4b23-8a7d-52785a3848c7" containerName="extract-utilities" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.195764 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd904e3-ad5c-4b23-8a7d-52785a3848c7" containerName="extract-utilities" Nov 25 10:54:53 crc kubenswrapper[4769]: E1125 10:54:53.195787 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fd904e3-ad5c-4b23-8a7d-52785a3848c7" containerName="registry-server" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.195792 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fd904e3-ad5c-4b23-8a7d-52785a3848c7" containerName="registry-server" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.196024 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fd904e3-ad5c-4b23-8a7d-52785a3848c7" containerName="registry-server" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.197684 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.213779 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6szml"] Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.228438 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/710c715d-f8e7-45e7-8884-61f2ccdcb764-catalog-content\") pod \"redhat-marketplace-6szml\" (UID: \"710c715d-f8e7-45e7-8884-61f2ccdcb764\") " pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.228530 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d49wl\" (UniqueName: \"kubernetes.io/projected/710c715d-f8e7-45e7-8884-61f2ccdcb764-kube-api-access-d49wl\") pod \"redhat-marketplace-6szml\" (UID: \"710c715d-f8e7-45e7-8884-61f2ccdcb764\") " pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.228714 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/710c715d-f8e7-45e7-8884-61f2ccdcb764-utilities\") pod \"redhat-marketplace-6szml\" (UID: \"710c715d-f8e7-45e7-8884-61f2ccdcb764\") " pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.332340 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/710c715d-f8e7-45e7-8884-61f2ccdcb764-utilities\") pod \"redhat-marketplace-6szml\" (UID: \"710c715d-f8e7-45e7-8884-61f2ccdcb764\") " pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.332421 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/710c715d-f8e7-45e7-8884-61f2ccdcb764-catalog-content\") pod \"redhat-marketplace-6szml\" (UID: \"710c715d-f8e7-45e7-8884-61f2ccdcb764\") " pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.332497 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/710c715d-f8e7-45e7-8884-61f2ccdcb764-utilities\") pod \"redhat-marketplace-6szml\" (UID: \"710c715d-f8e7-45e7-8884-61f2ccdcb764\") " pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.332746 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/710c715d-f8e7-45e7-8884-61f2ccdcb764-catalog-content\") pod \"redhat-marketplace-6szml\" (UID: \"710c715d-f8e7-45e7-8884-61f2ccdcb764\") " pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.332810 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d49wl\" (UniqueName: \"kubernetes.io/projected/710c715d-f8e7-45e7-8884-61f2ccdcb764-kube-api-access-d49wl\") pod \"redhat-marketplace-6szml\" (UID: \"710c715d-f8e7-45e7-8884-61f2ccdcb764\") " pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.354700 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d49wl\" (UniqueName: \"kubernetes.io/projected/710c715d-f8e7-45e7-8884-61f2ccdcb764-kube-api-access-d49wl\") pod \"redhat-marketplace-6szml\" (UID: \"710c715d-f8e7-45e7-8884-61f2ccdcb764\") " pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:54:53 crc kubenswrapper[4769]: I1125 10:54:53.524783 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:54:54 crc kubenswrapper[4769]: I1125 10:54:54.045783 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6szml"] Nov 25 10:54:54 crc kubenswrapper[4769]: I1125 10:54:54.340027 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6szml" event={"ID":"710c715d-f8e7-45e7-8884-61f2ccdcb764","Type":"ContainerStarted","Data":"1e6001fa98e5e815a64313426bdd760025b474b1953e037e87734b7f9725992e"} Nov 25 10:54:54 crc kubenswrapper[4769]: I1125 10:54:54.340075 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6szml" event={"ID":"710c715d-f8e7-45e7-8884-61f2ccdcb764","Type":"ContainerStarted","Data":"c5bbdff6cf3632f51e377bb85f98ad12399cdc73d60da5f9d505fbb21af7ecb2"} Nov 25 10:54:55 crc kubenswrapper[4769]: I1125 10:54:55.352551 4769 generic.go:334] "Generic (PLEG): container finished" podID="710c715d-f8e7-45e7-8884-61f2ccdcb764" containerID="1e6001fa98e5e815a64313426bdd760025b474b1953e037e87734b7f9725992e" exitCode=0 Nov 25 10:54:55 crc kubenswrapper[4769]: I1125 10:54:55.352910 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6szml" event={"ID":"710c715d-f8e7-45e7-8884-61f2ccdcb764","Type":"ContainerDied","Data":"1e6001fa98e5e815a64313426bdd760025b474b1953e037e87734b7f9725992e"} Nov 25 10:54:57 crc kubenswrapper[4769]: I1125 10:54:57.375471 4769 generic.go:334] "Generic (PLEG): container finished" podID="710c715d-f8e7-45e7-8884-61f2ccdcb764" containerID="6ac712844232cbc9c8201e2904798f60cbfbca5ca43a499e5479d9d83fdc03d5" exitCode=0 Nov 25 10:54:57 crc kubenswrapper[4769]: I1125 10:54:57.375518 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6szml" event={"ID":"710c715d-f8e7-45e7-8884-61f2ccdcb764","Type":"ContainerDied","Data":"6ac712844232cbc9c8201e2904798f60cbfbca5ca43a499e5479d9d83fdc03d5"} Nov 25 10:55:01 crc kubenswrapper[4769]: I1125 10:55:01.443443 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6szml" event={"ID":"710c715d-f8e7-45e7-8884-61f2ccdcb764","Type":"ContainerStarted","Data":"cef3a24ce648a537a7c7c3a7429ab66aa649a07fecaf0054776a7d7b92c41d76"} Nov 25 10:55:01 crc kubenswrapper[4769]: I1125 10:55:01.466224 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6szml" podStartSLOduration=3.46440564 podStartE2EDuration="8.466204489s" podCreationTimestamp="2025-11-25 10:54:53 +0000 UTC" firstStartedPulling="2025-11-25 10:54:55.3560716 +0000 UTC m=+4243.941043933" lastFinishedPulling="2025-11-25 10:55:00.357870429 +0000 UTC m=+4248.942842782" observedRunningTime="2025-11-25 10:55:01.458627849 +0000 UTC m=+4250.043600162" watchObservedRunningTime="2025-11-25 10:55:01.466204489 +0000 UTC m=+4250.051176802" Nov 25 10:55:03 crc kubenswrapper[4769]: I1125 10:55:03.525606 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:55:03 crc kubenswrapper[4769]: I1125 10:55:03.529164 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:55:03 crc kubenswrapper[4769]: I1125 10:55:03.580488 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:55:05 crc kubenswrapper[4769]: I1125 10:55:05.585090 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:55:05 crc kubenswrapper[4769]: I1125 10:55:05.644854 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6szml"] Nov 25 10:55:07 crc kubenswrapper[4769]: I1125 10:55:07.540242 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6szml" podUID="710c715d-f8e7-45e7-8884-61f2ccdcb764" containerName="registry-server" containerID="cri-o://cef3a24ce648a537a7c7c3a7429ab66aa649a07fecaf0054776a7d7b92c41d76" gracePeriod=2 Nov 25 10:55:08 crc kubenswrapper[4769]: I1125 10:55:08.559490 4769 generic.go:334] "Generic (PLEG): container finished" podID="710c715d-f8e7-45e7-8884-61f2ccdcb764" containerID="cef3a24ce648a537a7c7c3a7429ab66aa649a07fecaf0054776a7d7b92c41d76" exitCode=0 Nov 25 10:55:08 crc kubenswrapper[4769]: I1125 10:55:08.559594 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6szml" event={"ID":"710c715d-f8e7-45e7-8884-61f2ccdcb764","Type":"ContainerDied","Data":"cef3a24ce648a537a7c7c3a7429ab66aa649a07fecaf0054776a7d7b92c41d76"} Nov 25 10:55:08 crc kubenswrapper[4769]: I1125 10:55:08.921420 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.056489 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/710c715d-f8e7-45e7-8884-61f2ccdcb764-utilities\") pod \"710c715d-f8e7-45e7-8884-61f2ccdcb764\" (UID: \"710c715d-f8e7-45e7-8884-61f2ccdcb764\") " Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.056616 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/710c715d-f8e7-45e7-8884-61f2ccdcb764-catalog-content\") pod \"710c715d-f8e7-45e7-8884-61f2ccdcb764\" (UID: \"710c715d-f8e7-45e7-8884-61f2ccdcb764\") " Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.056710 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d49wl\" (UniqueName: \"kubernetes.io/projected/710c715d-f8e7-45e7-8884-61f2ccdcb764-kube-api-access-d49wl\") pod \"710c715d-f8e7-45e7-8884-61f2ccdcb764\" (UID: \"710c715d-f8e7-45e7-8884-61f2ccdcb764\") " Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.057875 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/710c715d-f8e7-45e7-8884-61f2ccdcb764-utilities" (OuterVolumeSpecName: "utilities") pod "710c715d-f8e7-45e7-8884-61f2ccdcb764" (UID: "710c715d-f8e7-45e7-8884-61f2ccdcb764"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.094558 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/710c715d-f8e7-45e7-8884-61f2ccdcb764-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "710c715d-f8e7-45e7-8884-61f2ccdcb764" (UID: "710c715d-f8e7-45e7-8884-61f2ccdcb764"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.160373 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/710c715d-f8e7-45e7-8884-61f2ccdcb764-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.160407 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/710c715d-f8e7-45e7-8884-61f2ccdcb764-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.581614 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6szml" event={"ID":"710c715d-f8e7-45e7-8884-61f2ccdcb764","Type":"ContainerDied","Data":"c5bbdff6cf3632f51e377bb85f98ad12399cdc73d60da5f9d505fbb21af7ecb2"} Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.581686 4769 scope.go:117] "RemoveContainer" containerID="cef3a24ce648a537a7c7c3a7429ab66aa649a07fecaf0054776a7d7b92c41d76" Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.581756 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6szml" Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.689113 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/710c715d-f8e7-45e7-8884-61f2ccdcb764-kube-api-access-d49wl" (OuterVolumeSpecName: "kube-api-access-d49wl") pod "710c715d-f8e7-45e7-8884-61f2ccdcb764" (UID: "710c715d-f8e7-45e7-8884-61f2ccdcb764"). InnerVolumeSpecName "kube-api-access-d49wl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.776144 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d49wl\" (UniqueName: \"kubernetes.io/projected/710c715d-f8e7-45e7-8884-61f2ccdcb764-kube-api-access-d49wl\") on node \"crc\" DevicePath \"\"" Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.782003 4769 scope.go:117] "RemoveContainer" containerID="6ac712844232cbc9c8201e2904798f60cbfbca5ca43a499e5479d9d83fdc03d5" Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.904360 4769 scope.go:117] "RemoveContainer" containerID="1e6001fa98e5e815a64313426bdd760025b474b1953e037e87734b7f9725992e" Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.926791 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6szml"] Nov 25 10:55:09 crc kubenswrapper[4769]: I1125 10:55:09.942803 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6szml"] Nov 25 10:55:10 crc kubenswrapper[4769]: I1125 10:55:10.256848 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="710c715d-f8e7-45e7-8884-61f2ccdcb764" path="/var/lib/kubelet/pods/710c715d-f8e7-45e7-8884-61f2ccdcb764/volumes" Nov 25 10:56:22 crc kubenswrapper[4769]: I1125 10:56:22.293841 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:56:22 crc kubenswrapper[4769]: I1125 10:56:22.294547 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:56:52 crc kubenswrapper[4769]: I1125 10:56:52.290326 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:56:52 crc kubenswrapper[4769]: I1125 10:56:52.290849 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:57:22 crc kubenswrapper[4769]: I1125 10:57:22.290285 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:57:22 crc kubenswrapper[4769]: I1125 10:57:22.292049 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:57:22 crc kubenswrapper[4769]: I1125 10:57:22.292208 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 10:57:22 crc kubenswrapper[4769]: I1125 10:57:22.293273 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:57:22 crc kubenswrapper[4769]: I1125 10:57:22.293434 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" gracePeriod=600 Nov 25 10:57:22 crc kubenswrapper[4769]: E1125 10:57:22.418522 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:57:23 crc kubenswrapper[4769]: I1125 10:57:23.347351 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" exitCode=0 Nov 25 10:57:23 crc kubenswrapper[4769]: I1125 10:57:23.347730 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05"} Nov 25 10:57:23 crc kubenswrapper[4769]: I1125 10:57:23.347777 4769 scope.go:117] "RemoveContainer" containerID="6ee7bc9266cc374f05539fb2582761256290e07c7dda15eb655fce905ed00be0" Nov 25 10:57:23 crc kubenswrapper[4769]: I1125 10:57:23.348825 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:57:23 crc kubenswrapper[4769]: E1125 10:57:23.349303 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:57:37 crc kubenswrapper[4769]: I1125 10:57:37.238690 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:57:37 crc kubenswrapper[4769]: E1125 10:57:37.239571 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:57:49 crc kubenswrapper[4769]: I1125 10:57:49.237862 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:57:49 crc kubenswrapper[4769]: E1125 10:57:49.238786 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.707926 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rxxxf"] Nov 25 10:57:56 crc kubenswrapper[4769]: E1125 10:57:56.709675 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="710c715d-f8e7-45e7-8884-61f2ccdcb764" containerName="registry-server" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.709708 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="710c715d-f8e7-45e7-8884-61f2ccdcb764" containerName="registry-server" Nov 25 10:57:56 crc kubenswrapper[4769]: E1125 10:57:56.709754 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="710c715d-f8e7-45e7-8884-61f2ccdcb764" containerName="extract-utilities" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.709772 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="710c715d-f8e7-45e7-8884-61f2ccdcb764" containerName="extract-utilities" Nov 25 10:57:56 crc kubenswrapper[4769]: E1125 10:57:56.709835 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="710c715d-f8e7-45e7-8884-61f2ccdcb764" containerName="extract-content" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.709845 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="710c715d-f8e7-45e7-8884-61f2ccdcb764" containerName="extract-content" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.710520 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="710c715d-f8e7-45e7-8884-61f2ccdcb764" containerName="registry-server" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.722835 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.785606 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rxxxf"] Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.810756 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-catalog-content\") pod \"redhat-operators-rxxxf\" (UID: \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\") " pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.810883 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tqpd\" (UniqueName: \"kubernetes.io/projected/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-kube-api-access-9tqpd\") pod \"redhat-operators-rxxxf\" (UID: \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\") " pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.810946 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-utilities\") pod \"redhat-operators-rxxxf\" (UID: \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\") " pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.913657 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-catalog-content\") pod \"redhat-operators-rxxxf\" (UID: \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\") " pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.914072 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tqpd\" (UniqueName: \"kubernetes.io/projected/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-kube-api-access-9tqpd\") pod \"redhat-operators-rxxxf\" (UID: \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\") " pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.914228 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-utilities\") pod \"redhat-operators-rxxxf\" (UID: \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\") " pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.914284 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-catalog-content\") pod \"redhat-operators-rxxxf\" (UID: \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\") " pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.914630 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-utilities\") pod \"redhat-operators-rxxxf\" (UID: \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\") " pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:57:56 crc kubenswrapper[4769]: I1125 10:57:56.941212 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tqpd\" (UniqueName: \"kubernetes.io/projected/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-kube-api-access-9tqpd\") pod \"redhat-operators-rxxxf\" (UID: \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\") " pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:57:57 crc kubenswrapper[4769]: I1125 10:57:57.109003 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:57:57 crc kubenswrapper[4769]: I1125 10:57:57.616788 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rxxxf"] Nov 25 10:57:58 crc kubenswrapper[4769]: I1125 10:57:58.164947 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rxxxf" event={"ID":"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9","Type":"ContainerStarted","Data":"e47fe1eca62e16d9b4e0d118049ed48e226c3e56113cea3e20cfdec8cfd18898"} Nov 25 10:57:59 crc kubenswrapper[4769]: I1125 10:57:59.185250 4769 generic.go:334] "Generic (PLEG): container finished" podID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerID="8bd7b8dbdcced012fe3b2c3180159d6862c6c286dde0f7e5d5e38d5dfe3992d1" exitCode=0 Nov 25 10:57:59 crc kubenswrapper[4769]: I1125 10:57:59.185703 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rxxxf" event={"ID":"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9","Type":"ContainerDied","Data":"8bd7b8dbdcced012fe3b2c3180159d6862c6c286dde0f7e5d5e38d5dfe3992d1"} Nov 25 10:57:59 crc kubenswrapper[4769]: I1125 10:57:59.189339 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:58:00 crc kubenswrapper[4769]: I1125 10:58:00.243282 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:58:00 crc kubenswrapper[4769]: E1125 10:58:00.243994 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:58:01 crc kubenswrapper[4769]: I1125 10:58:01.222241 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rxxxf" event={"ID":"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9","Type":"ContainerStarted","Data":"074d655f2e838d691282831da498e97c76dcaf3f2672576f11f92ffbd7a00c98"} Nov 25 10:58:06 crc kubenswrapper[4769]: I1125 10:58:06.314111 4769 generic.go:334] "Generic (PLEG): container finished" podID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerID="074d655f2e838d691282831da498e97c76dcaf3f2672576f11f92ffbd7a00c98" exitCode=0 Nov 25 10:58:06 crc kubenswrapper[4769]: I1125 10:58:06.314219 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rxxxf" event={"ID":"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9","Type":"ContainerDied","Data":"074d655f2e838d691282831da498e97c76dcaf3f2672576f11f92ffbd7a00c98"} Nov 25 10:58:08 crc kubenswrapper[4769]: I1125 10:58:08.341231 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rxxxf" event={"ID":"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9","Type":"ContainerStarted","Data":"cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee"} Nov 25 10:58:08 crc kubenswrapper[4769]: I1125 10:58:08.370190 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rxxxf" podStartSLOduration=4.780370073 podStartE2EDuration="12.37016927s" podCreationTimestamp="2025-11-25 10:57:56 +0000 UTC" firstStartedPulling="2025-11-25 10:57:59.188738908 +0000 UTC m=+4427.773711271" lastFinishedPulling="2025-11-25 10:58:06.778538155 +0000 UTC m=+4435.363510468" observedRunningTime="2025-11-25 10:58:08.35578327 +0000 UTC m=+4436.940755593" watchObservedRunningTime="2025-11-25 10:58:08.37016927 +0000 UTC m=+4436.955141583" Nov 25 10:58:14 crc kubenswrapper[4769]: I1125 10:58:14.238233 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:58:14 crc kubenswrapper[4769]: E1125 10:58:14.239384 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:58:17 crc kubenswrapper[4769]: I1125 10:58:17.109226 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:58:17 crc kubenswrapper[4769]: I1125 10:58:17.111457 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:58:18 crc kubenswrapper[4769]: I1125 10:58:18.174854 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rxxxf" podUID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerName="registry-server" probeResult="failure" output=< Nov 25 10:58:18 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 10:58:18 crc kubenswrapper[4769]: > Nov 25 10:58:27 crc kubenswrapper[4769]: I1125 10:58:27.236938 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:58:27 crc kubenswrapper[4769]: E1125 10:58:27.237685 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:58:28 crc kubenswrapper[4769]: I1125 10:58:28.157472 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rxxxf" podUID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerName="registry-server" probeResult="failure" output=< Nov 25 10:58:28 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 10:58:28 crc kubenswrapper[4769]: > Nov 25 10:58:37 crc kubenswrapper[4769]: I1125 10:58:37.173359 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:58:37 crc kubenswrapper[4769]: I1125 10:58:37.234516 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:58:37 crc kubenswrapper[4769]: I1125 10:58:37.422699 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rxxxf"] Nov 25 10:58:38 crc kubenswrapper[4769]: I1125 10:58:38.690876 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rxxxf" podUID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerName="registry-server" containerID="cri-o://cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee" gracePeriod=2 Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.332663 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.442321 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-catalog-content\") pod \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\" (UID: \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\") " Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.443022 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tqpd\" (UniqueName: \"kubernetes.io/projected/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-kube-api-access-9tqpd\") pod \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\" (UID: \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\") " Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.443188 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-utilities\") pod \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\" (UID: \"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9\") " Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.443706 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-utilities" (OuterVolumeSpecName: "utilities") pod "8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" (UID: "8f1cb2e5-4ac1-405c-9fd5-9274daa916d9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.444201 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.451566 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-kube-api-access-9tqpd" (OuterVolumeSpecName: "kube-api-access-9tqpd") pod "8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" (UID: "8f1cb2e5-4ac1-405c-9fd5-9274daa916d9"). InnerVolumeSpecName "kube-api-access-9tqpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.546716 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tqpd\" (UniqueName: \"kubernetes.io/projected/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-kube-api-access-9tqpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.559225 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" (UID: "8f1cb2e5-4ac1-405c-9fd5-9274daa916d9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.648469 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.702979 4769 generic.go:334] "Generic (PLEG): container finished" podID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerID="cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee" exitCode=0 Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.703003 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rxxxf" event={"ID":"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9","Type":"ContainerDied","Data":"cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee"} Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.703057 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rxxxf" event={"ID":"8f1cb2e5-4ac1-405c-9fd5-9274daa916d9","Type":"ContainerDied","Data":"e47fe1eca62e16d9b4e0d118049ed48e226c3e56113cea3e20cfdec8cfd18898"} Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.703081 4769 scope.go:117] "RemoveContainer" containerID="cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.704186 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rxxxf" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.745187 4769 scope.go:117] "RemoveContainer" containerID="074d655f2e838d691282831da498e97c76dcaf3f2672576f11f92ffbd7a00c98" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.814938 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rxxxf"] Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.827328 4769 scope.go:117] "RemoveContainer" containerID="8bd7b8dbdcced012fe3b2c3180159d6862c6c286dde0f7e5d5e38d5dfe3992d1" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.834431 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rxxxf"] Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.849302 4769 scope.go:117] "RemoveContainer" containerID="cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee" Nov 25 10:58:39 crc kubenswrapper[4769]: E1125 10:58:39.849680 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee\": container with ID starting with cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee not found: ID does not exist" containerID="cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.849710 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee"} err="failed to get container status \"cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee\": rpc error: code = NotFound desc = could not find container \"cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee\": container with ID starting with cfd301b2b1f4857ebce406e2ff02dbd1eb156ce1fb1cca2df4e7d12cc705e5ee not found: ID does not exist" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.849731 4769 scope.go:117] "RemoveContainer" containerID="074d655f2e838d691282831da498e97c76dcaf3f2672576f11f92ffbd7a00c98" Nov 25 10:58:39 crc kubenswrapper[4769]: E1125 10:58:39.850049 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"074d655f2e838d691282831da498e97c76dcaf3f2672576f11f92ffbd7a00c98\": container with ID starting with 074d655f2e838d691282831da498e97c76dcaf3f2672576f11f92ffbd7a00c98 not found: ID does not exist" containerID="074d655f2e838d691282831da498e97c76dcaf3f2672576f11f92ffbd7a00c98" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.850098 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"074d655f2e838d691282831da498e97c76dcaf3f2672576f11f92ffbd7a00c98"} err="failed to get container status \"074d655f2e838d691282831da498e97c76dcaf3f2672576f11f92ffbd7a00c98\": rpc error: code = NotFound desc = could not find container \"074d655f2e838d691282831da498e97c76dcaf3f2672576f11f92ffbd7a00c98\": container with ID starting with 074d655f2e838d691282831da498e97c76dcaf3f2672576f11f92ffbd7a00c98 not found: ID does not exist" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.850124 4769 scope.go:117] "RemoveContainer" containerID="8bd7b8dbdcced012fe3b2c3180159d6862c6c286dde0f7e5d5e38d5dfe3992d1" Nov 25 10:58:39 crc kubenswrapper[4769]: E1125 10:58:39.850398 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bd7b8dbdcced012fe3b2c3180159d6862c6c286dde0f7e5d5e38d5dfe3992d1\": container with ID starting with 8bd7b8dbdcced012fe3b2c3180159d6862c6c286dde0f7e5d5e38d5dfe3992d1 not found: ID does not exist" containerID="8bd7b8dbdcced012fe3b2c3180159d6862c6c286dde0f7e5d5e38d5dfe3992d1" Nov 25 10:58:39 crc kubenswrapper[4769]: I1125 10:58:39.850418 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bd7b8dbdcced012fe3b2c3180159d6862c6c286dde0f7e5d5e38d5dfe3992d1"} err="failed to get container status \"8bd7b8dbdcced012fe3b2c3180159d6862c6c286dde0f7e5d5e38d5dfe3992d1\": rpc error: code = NotFound desc = could not find container \"8bd7b8dbdcced012fe3b2c3180159d6862c6c286dde0f7e5d5e38d5dfe3992d1\": container with ID starting with 8bd7b8dbdcced012fe3b2c3180159d6862c6c286dde0f7e5d5e38d5dfe3992d1 not found: ID does not exist" Nov 25 10:58:40 crc kubenswrapper[4769]: I1125 10:58:40.237418 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:58:40 crc kubenswrapper[4769]: E1125 10:58:40.238250 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:58:40 crc kubenswrapper[4769]: I1125 10:58:40.254508 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" path="/var/lib/kubelet/pods/8f1cb2e5-4ac1-405c-9fd5-9274daa916d9/volumes" Nov 25 10:58:53 crc kubenswrapper[4769]: I1125 10:58:53.237058 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:58:53 crc kubenswrapper[4769]: E1125 10:58:53.238590 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:59:04 crc kubenswrapper[4769]: I1125 10:59:04.237540 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:59:04 crc kubenswrapper[4769]: E1125 10:59:04.238423 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:59:15 crc kubenswrapper[4769]: I1125 10:59:15.238117 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:59:15 crc kubenswrapper[4769]: E1125 10:59:15.239031 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:59:26 crc kubenswrapper[4769]: I1125 10:59:26.237072 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:59:26 crc kubenswrapper[4769]: E1125 10:59:26.239379 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:59:39 crc kubenswrapper[4769]: I1125 10:59:39.244489 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:59:39 crc kubenswrapper[4769]: E1125 10:59:39.245319 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:59:51 crc kubenswrapper[4769]: I1125 10:59:51.237862 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 10:59:51 crc kubenswrapper[4769]: E1125 10:59:51.239325 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 10:59:56 crc kubenswrapper[4769]: I1125 10:59:56.790856 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="eb894707-cfa1-4716-a991-31992d8cff88" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Nov 25 10:59:58 crc kubenswrapper[4769]: I1125 10:59:58.455612 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="eb894707-cfa1-4716-a991-31992d8cff88" containerName="ceilometer-central-agent" probeResult="failure" output=< Nov 25 10:59:58 crc kubenswrapper[4769]: Unkown error: Expecting value: line 1 column 1 (char 0) Nov 25 10:59:58 crc kubenswrapper[4769]: > Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.170562 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l"] Nov 25 11:00:00 crc kubenswrapper[4769]: E1125 11:00:00.171530 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerName="extract-content" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.171546 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerName="extract-content" Nov 25 11:00:00 crc kubenswrapper[4769]: E1125 11:00:00.171585 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerName="registry-server" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.171593 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerName="registry-server" Nov 25 11:00:00 crc kubenswrapper[4769]: E1125 11:00:00.171632 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerName="extract-utilities" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.171641 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerName="extract-utilities" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.171940 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f1cb2e5-4ac1-405c-9fd5-9274daa916d9" containerName="registry-server" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.173089 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.176170 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.176355 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.198837 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l"] Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.209707 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3073507c-bfef-44e2-a58c-68212ea59a75-config-volume\") pod \"collect-profiles-29401140-xql6l\" (UID: \"3073507c-bfef-44e2-a58c-68212ea59a75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.209980 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3073507c-bfef-44e2-a58c-68212ea59a75-secret-volume\") pod \"collect-profiles-29401140-xql6l\" (UID: \"3073507c-bfef-44e2-a58c-68212ea59a75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.210087 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cxw6\" (UniqueName: \"kubernetes.io/projected/3073507c-bfef-44e2-a58c-68212ea59a75-kube-api-access-7cxw6\") pod \"collect-profiles-29401140-xql6l\" (UID: \"3073507c-bfef-44e2-a58c-68212ea59a75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.313669 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3073507c-bfef-44e2-a58c-68212ea59a75-config-volume\") pod \"collect-profiles-29401140-xql6l\" (UID: \"3073507c-bfef-44e2-a58c-68212ea59a75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.314012 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3073507c-bfef-44e2-a58c-68212ea59a75-secret-volume\") pod \"collect-profiles-29401140-xql6l\" (UID: \"3073507c-bfef-44e2-a58c-68212ea59a75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.314077 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cxw6\" (UniqueName: \"kubernetes.io/projected/3073507c-bfef-44e2-a58c-68212ea59a75-kube-api-access-7cxw6\") pod \"collect-profiles-29401140-xql6l\" (UID: \"3073507c-bfef-44e2-a58c-68212ea59a75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.318725 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3073507c-bfef-44e2-a58c-68212ea59a75-config-volume\") pod \"collect-profiles-29401140-xql6l\" (UID: \"3073507c-bfef-44e2-a58c-68212ea59a75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.332368 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3073507c-bfef-44e2-a58c-68212ea59a75-secret-volume\") pod \"collect-profiles-29401140-xql6l\" (UID: \"3073507c-bfef-44e2-a58c-68212ea59a75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.335589 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cxw6\" (UniqueName: \"kubernetes.io/projected/3073507c-bfef-44e2-a58c-68212ea59a75-kube-api-access-7cxw6\") pod \"collect-profiles-29401140-xql6l\" (UID: \"3073507c-bfef-44e2-a58c-68212ea59a75\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:00 crc kubenswrapper[4769]: I1125 11:00:00.493708 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:01 crc kubenswrapper[4769]: I1125 11:00:01.026616 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l"] Nov 25 11:00:01 crc kubenswrapper[4769]: I1125 11:00:01.389167 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="eb894707-cfa1-4716-a991-31992d8cff88" containerName="ceilometer-central-agent" probeResult="failure" output=< Nov 25 11:00:01 crc kubenswrapper[4769]: Unkown error: Expecting value: line 1 column 1 (char 0) Nov 25 11:00:01 crc kubenswrapper[4769]: > Nov 25 11:00:01 crc kubenswrapper[4769]: I1125 11:00:01.389233 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/ceilometer-0" Nov 25 11:00:01 crc kubenswrapper[4769]: I1125 11:00:01.390065 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="ceilometer-central-agent" containerStatusID={"Type":"cri-o","ID":"78d1379800f04f8ad4decb9661f3d1d7cd9089312221cd1ef27440e40e9bfff8"} pod="openstack/ceilometer-0" containerMessage="Container ceilometer-central-agent failed liveness probe, will be restarted" Nov 25 11:00:01 crc kubenswrapper[4769]: I1125 11:00:01.390145 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb894707-cfa1-4716-a991-31992d8cff88" containerName="ceilometer-central-agent" containerID="cri-o://78d1379800f04f8ad4decb9661f3d1d7cd9089312221cd1ef27440e40e9bfff8" gracePeriod=30 Nov 25 11:00:01 crc kubenswrapper[4769]: I1125 11:00:01.813271 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" event={"ID":"3073507c-bfef-44e2-a58c-68212ea59a75","Type":"ContainerStarted","Data":"8ef4809cf66f859102f08172b2cc67a29b029375fd2fdc672b3933e686021194"} Nov 25 11:00:01 crc kubenswrapper[4769]: I1125 11:00:01.813599 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" event={"ID":"3073507c-bfef-44e2-a58c-68212ea59a75","Type":"ContainerStarted","Data":"2516b2a675b6e5dadf3a6562c590c2ec4b5f105eb5bc3307df83049b52909ba8"} Nov 25 11:00:01 crc kubenswrapper[4769]: I1125 11:00:01.842576 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" podStartSLOduration=1.842547713 podStartE2EDuration="1.842547713s" podCreationTimestamp="2025-11-25 11:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:00:01.831669056 +0000 UTC m=+4550.416641379" watchObservedRunningTime="2025-11-25 11:00:01.842547713 +0000 UTC m=+4550.427520046" Nov 25 11:00:02 crc kubenswrapper[4769]: I1125 11:00:02.827286 4769 generic.go:334] "Generic (PLEG): container finished" podID="eb894707-cfa1-4716-a991-31992d8cff88" containerID="78d1379800f04f8ad4decb9661f3d1d7cd9089312221cd1ef27440e40e9bfff8" exitCode=0 Nov 25 11:00:02 crc kubenswrapper[4769]: I1125 11:00:02.827372 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb894707-cfa1-4716-a991-31992d8cff88","Type":"ContainerDied","Data":"78d1379800f04f8ad4decb9661f3d1d7cd9089312221cd1ef27440e40e9bfff8"} Nov 25 11:00:02 crc kubenswrapper[4769]: I1125 11:00:02.829318 4769 generic.go:334] "Generic (PLEG): container finished" podID="3073507c-bfef-44e2-a58c-68212ea59a75" containerID="8ef4809cf66f859102f08172b2cc67a29b029375fd2fdc672b3933e686021194" exitCode=0 Nov 25 11:00:02 crc kubenswrapper[4769]: I1125 11:00:02.829363 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" event={"ID":"3073507c-bfef-44e2-a58c-68212ea59a75","Type":"ContainerDied","Data":"8ef4809cf66f859102f08172b2cc67a29b029375fd2fdc672b3933e686021194"} Nov 25 11:00:03 crc kubenswrapper[4769]: I1125 11:00:03.864468 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb894707-cfa1-4716-a991-31992d8cff88","Type":"ContainerStarted","Data":"d4edd3c5a09de4dcd68abe44a482cb7fe9db0334642d98b82873651929dc762f"} Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.267837 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.341502 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cxw6\" (UniqueName: \"kubernetes.io/projected/3073507c-bfef-44e2-a58c-68212ea59a75-kube-api-access-7cxw6\") pod \"3073507c-bfef-44e2-a58c-68212ea59a75\" (UID: \"3073507c-bfef-44e2-a58c-68212ea59a75\") " Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.341624 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3073507c-bfef-44e2-a58c-68212ea59a75-config-volume\") pod \"3073507c-bfef-44e2-a58c-68212ea59a75\" (UID: \"3073507c-bfef-44e2-a58c-68212ea59a75\") " Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.341883 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3073507c-bfef-44e2-a58c-68212ea59a75-secret-volume\") pod \"3073507c-bfef-44e2-a58c-68212ea59a75\" (UID: \"3073507c-bfef-44e2-a58c-68212ea59a75\") " Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.342493 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3073507c-bfef-44e2-a58c-68212ea59a75-config-volume" (OuterVolumeSpecName: "config-volume") pod "3073507c-bfef-44e2-a58c-68212ea59a75" (UID: "3073507c-bfef-44e2-a58c-68212ea59a75"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.342641 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3073507c-bfef-44e2-a58c-68212ea59a75-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.348234 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3073507c-bfef-44e2-a58c-68212ea59a75-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3073507c-bfef-44e2-a58c-68212ea59a75" (UID: "3073507c-bfef-44e2-a58c-68212ea59a75"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.348927 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3073507c-bfef-44e2-a58c-68212ea59a75-kube-api-access-7cxw6" (OuterVolumeSpecName: "kube-api-access-7cxw6") pod "3073507c-bfef-44e2-a58c-68212ea59a75" (UID: "3073507c-bfef-44e2-a58c-68212ea59a75"). InnerVolumeSpecName "kube-api-access-7cxw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.445685 4769 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3073507c-bfef-44e2-a58c-68212ea59a75-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.445725 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cxw6\" (UniqueName: \"kubernetes.io/projected/3073507c-bfef-44e2-a58c-68212ea59a75-kube-api-access-7cxw6\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.883100 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" event={"ID":"3073507c-bfef-44e2-a58c-68212ea59a75","Type":"ContainerDied","Data":"2516b2a675b6e5dadf3a6562c590c2ec4b5f105eb5bc3307df83049b52909ba8"} Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.883490 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2516b2a675b6e5dadf3a6562c590c2ec4b5f105eb5bc3307df83049b52909ba8" Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.883142 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-xql6l" Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.925497 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns"] Nov 25 11:00:04 crc kubenswrapper[4769]: I1125 11:00:04.937521 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-9phns"] Nov 25 11:00:05 crc kubenswrapper[4769]: I1125 11:00:05.237296 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:00:05 crc kubenswrapper[4769]: E1125 11:00:05.237876 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:00:06 crc kubenswrapper[4769]: I1125 11:00:06.267704 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bef55661-600d-4213-8b7a-d1ac8fec5b6b" path="/var/lib/kubelet/pods/bef55661-600d-4213-8b7a-d1ac8fec5b6b/volumes" Nov 25 11:00:18 crc kubenswrapper[4769]: I1125 11:00:18.247952 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:00:18 crc kubenswrapper[4769]: E1125 11:00:18.249078 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:00:32 crc kubenswrapper[4769]: I1125 11:00:32.245574 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:00:32 crc kubenswrapper[4769]: E1125 11:00:32.246310 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.454908 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-75kx2"] Nov 25 11:00:39 crc kubenswrapper[4769]: E1125 11:00:39.456441 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3073507c-bfef-44e2-a58c-68212ea59a75" containerName="collect-profiles" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.456461 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3073507c-bfef-44e2-a58c-68212ea59a75" containerName="collect-profiles" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.456773 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3073507c-bfef-44e2-a58c-68212ea59a75" containerName="collect-profiles" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.462990 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.478074 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-75kx2"] Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.597352 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chk2l\" (UniqueName: \"kubernetes.io/projected/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-kube-api-access-chk2l\") pod \"certified-operators-75kx2\" (UID: \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\") " pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.597418 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-catalog-content\") pod \"certified-operators-75kx2\" (UID: \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\") " pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.597683 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-utilities\") pod \"certified-operators-75kx2\" (UID: \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\") " pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.700498 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-utilities\") pod \"certified-operators-75kx2\" (UID: \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\") " pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.700612 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chk2l\" (UniqueName: \"kubernetes.io/projected/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-kube-api-access-chk2l\") pod \"certified-operators-75kx2\" (UID: \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\") " pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.700646 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-catalog-content\") pod \"certified-operators-75kx2\" (UID: \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\") " pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.701181 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-utilities\") pod \"certified-operators-75kx2\" (UID: \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\") " pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.701290 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-catalog-content\") pod \"certified-operators-75kx2\" (UID: \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\") " pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.724505 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chk2l\" (UniqueName: \"kubernetes.io/projected/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-kube-api-access-chk2l\") pod \"certified-operators-75kx2\" (UID: \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\") " pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:00:39 crc kubenswrapper[4769]: I1125 11:00:39.805539 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:00:40 crc kubenswrapper[4769]: I1125 11:00:40.436675 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-75kx2"] Nov 25 11:00:41 crc kubenswrapper[4769]: I1125 11:00:41.337819 4769 generic.go:334] "Generic (PLEG): container finished" podID="6cd77c76-d095-45f7-bbdc-608e5e06a0b6" containerID="21395afcf2a7440441e548a7ad93c8be28e06786dfc6597de881ed00887c307c" exitCode=0 Nov 25 11:00:41 crc kubenswrapper[4769]: I1125 11:00:41.337918 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75kx2" event={"ID":"6cd77c76-d095-45f7-bbdc-608e5e06a0b6","Type":"ContainerDied","Data":"21395afcf2a7440441e548a7ad93c8be28e06786dfc6597de881ed00887c307c"} Nov 25 11:00:41 crc kubenswrapper[4769]: I1125 11:00:41.337997 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75kx2" event={"ID":"6cd77c76-d095-45f7-bbdc-608e5e06a0b6","Type":"ContainerStarted","Data":"9c826942f11c7984de51eac396fa20427341473667c4e83796d52e34b6c756c9"} Nov 25 11:00:42 crc kubenswrapper[4769]: I1125 11:00:42.352337 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75kx2" event={"ID":"6cd77c76-d095-45f7-bbdc-608e5e06a0b6","Type":"ContainerStarted","Data":"4b772daffd21bfea3692c7cdfd9435105f1b9ccc469f126441cfa9f888d2e114"} Nov 25 11:00:44 crc kubenswrapper[4769]: I1125 11:00:44.238630 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:00:44 crc kubenswrapper[4769]: E1125 11:00:44.240540 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:00:46 crc kubenswrapper[4769]: I1125 11:00:46.412879 4769 generic.go:334] "Generic (PLEG): container finished" podID="6cd77c76-d095-45f7-bbdc-608e5e06a0b6" containerID="4b772daffd21bfea3692c7cdfd9435105f1b9ccc469f126441cfa9f888d2e114" exitCode=0 Nov 25 11:00:46 crc kubenswrapper[4769]: I1125 11:00:46.413074 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75kx2" event={"ID":"6cd77c76-d095-45f7-bbdc-608e5e06a0b6","Type":"ContainerDied","Data":"4b772daffd21bfea3692c7cdfd9435105f1b9ccc469f126441cfa9f888d2e114"} Nov 25 11:00:47 crc kubenswrapper[4769]: I1125 11:00:47.787078 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="52438bf8-8800-4078-bc88-63033a83dd2e" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:00:49 crc kubenswrapper[4769]: I1125 11:00:49.236251 4769 scope.go:117] "RemoveContainer" containerID="cba2d6c2679932ed0a4e4cc0bc3ec8db000eb1206ecba9d90ac273d5742b29c6" Nov 25 11:00:50 crc kubenswrapper[4769]: I1125 11:00:50.472313 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75kx2" event={"ID":"6cd77c76-d095-45f7-bbdc-608e5e06a0b6","Type":"ContainerStarted","Data":"c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64"} Nov 25 11:00:57 crc kubenswrapper[4769]: I1125 11:00:57.237942 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:00:57 crc kubenswrapper[4769]: E1125 11:00:57.239209 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:00:59 crc kubenswrapper[4769]: I1125 11:00:59.806820 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:00:59 crc kubenswrapper[4769]: I1125 11:00:59.807383 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.147344 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-75kx2" podStartSLOduration=13.140193818 podStartE2EDuration="21.1473228s" podCreationTimestamp="2025-11-25 11:00:39 +0000 UTC" firstStartedPulling="2025-11-25 11:00:41.341058275 +0000 UTC m=+4589.926030628" lastFinishedPulling="2025-11-25 11:00:49.348187297 +0000 UTC m=+4597.933159610" observedRunningTime="2025-11-25 11:00:50.508468167 +0000 UTC m=+4599.093440490" watchObservedRunningTime="2025-11-25 11:01:00.1473228 +0000 UTC m=+4608.732295123" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.148649 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401141-t6zhj"] Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.150495 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.164247 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401141-t6zhj"] Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.252349 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.253752 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7dnk\" (UniqueName: \"kubernetes.io/projected/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-kube-api-access-z7dnk\") pod \"keystone-cron-29401141-t6zhj\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.253844 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-combined-ca-bundle\") pod \"keystone-cron-29401141-t6zhj\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.253896 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-fernet-keys\") pod \"keystone-cron-29401141-t6zhj\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.254025 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-config-data\") pod \"keystone-cron-29401141-t6zhj\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.356113 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7dnk\" (UniqueName: \"kubernetes.io/projected/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-kube-api-access-z7dnk\") pod \"keystone-cron-29401141-t6zhj\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.356580 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-combined-ca-bundle\") pod \"keystone-cron-29401141-t6zhj\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.356717 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-fernet-keys\") pod \"keystone-cron-29401141-t6zhj\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.356912 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-config-data\") pod \"keystone-cron-29401141-t6zhj\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.365443 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-config-data\") pod \"keystone-cron-29401141-t6zhj\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.366175 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-combined-ca-bundle\") pod \"keystone-cron-29401141-t6zhj\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.366312 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-fernet-keys\") pod \"keystone-cron-29401141-t6zhj\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.386765 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7dnk\" (UniqueName: \"kubernetes.io/projected/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-kube-api-access-z7dnk\") pod \"keystone-cron-29401141-t6zhj\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.509142 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.666209 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.742664 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-75kx2"] Nov 25 11:01:00 crc kubenswrapper[4769]: I1125 11:01:00.997353 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401141-t6zhj"] Nov 25 11:01:01 crc kubenswrapper[4769]: I1125 11:01:01.621131 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401141-t6zhj" event={"ID":"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9","Type":"ContainerStarted","Data":"5e7e701962a02026017dd9bad5bdebb47a1759923aec86dc4feb487805067272"} Nov 25 11:01:01 crc kubenswrapper[4769]: I1125 11:01:01.621502 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401141-t6zhj" event={"ID":"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9","Type":"ContainerStarted","Data":"1b6b5c8f5c49b0650e236ee4517a1aa9f96d00f66018dffe5d516b05151cb0da"} Nov 25 11:01:01 crc kubenswrapper[4769]: I1125 11:01:01.638817 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401141-t6zhj" podStartSLOduration=1.6388020810000001 podStartE2EDuration="1.638802081s" podCreationTimestamp="2025-11-25 11:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:01:01.635433462 +0000 UTC m=+4610.220405775" watchObservedRunningTime="2025-11-25 11:01:01.638802081 +0000 UTC m=+4610.223774394" Nov 25 11:01:02 crc kubenswrapper[4769]: I1125 11:01:02.634621 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-75kx2" podUID="6cd77c76-d095-45f7-bbdc-608e5e06a0b6" containerName="registry-server" containerID="cri-o://c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64" gracePeriod=2 Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.228600 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.332731 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-utilities\") pod \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\" (UID: \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\") " Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.332809 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-chk2l\" (UniqueName: \"kubernetes.io/projected/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-kube-api-access-chk2l\") pod \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\" (UID: \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\") " Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.333064 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-catalog-content\") pod \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\" (UID: \"6cd77c76-d095-45f7-bbdc-608e5e06a0b6\") " Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.333880 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-utilities" (OuterVolumeSpecName: "utilities") pod "6cd77c76-d095-45f7-bbdc-608e5e06a0b6" (UID: "6cd77c76-d095-45f7-bbdc-608e5e06a0b6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.343094 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-kube-api-access-chk2l" (OuterVolumeSpecName: "kube-api-access-chk2l") pod "6cd77c76-d095-45f7-bbdc-608e5e06a0b6" (UID: "6cd77c76-d095-45f7-bbdc-608e5e06a0b6"). InnerVolumeSpecName "kube-api-access-chk2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.438754 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.439523 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6cd77c76-d095-45f7-bbdc-608e5e06a0b6" (UID: "6cd77c76-d095-45f7-bbdc-608e5e06a0b6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.439551 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-chk2l\" (UniqueName: \"kubernetes.io/projected/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-kube-api-access-chk2l\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.541173 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cd77c76-d095-45f7-bbdc-608e5e06a0b6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.649591 4769 generic.go:334] "Generic (PLEG): container finished" podID="6cd77c76-d095-45f7-bbdc-608e5e06a0b6" containerID="c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64" exitCode=0 Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.649631 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75kx2" event={"ID":"6cd77c76-d095-45f7-bbdc-608e5e06a0b6","Type":"ContainerDied","Data":"c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64"} Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.649639 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75kx2" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.649657 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75kx2" event={"ID":"6cd77c76-d095-45f7-bbdc-608e5e06a0b6","Type":"ContainerDied","Data":"9c826942f11c7984de51eac396fa20427341473667c4e83796d52e34b6c756c9"} Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.649718 4769 scope.go:117] "RemoveContainer" containerID="c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.681453 4769 scope.go:117] "RemoveContainer" containerID="4b772daffd21bfea3692c7cdfd9435105f1b9ccc469f126441cfa9f888d2e114" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.699861 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-75kx2"] Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.718713 4769 scope.go:117] "RemoveContainer" containerID="21395afcf2a7440441e548a7ad93c8be28e06786dfc6597de881ed00887c307c" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.719083 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-75kx2"] Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.765584 4769 scope.go:117] "RemoveContainer" containerID="c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64" Nov 25 11:01:03 crc kubenswrapper[4769]: E1125 11:01:03.766039 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64\": container with ID starting with c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64 not found: ID does not exist" containerID="c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.766086 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64"} err="failed to get container status \"c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64\": rpc error: code = NotFound desc = could not find container \"c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64\": container with ID starting with c41dc29f4f6f79af70c0b3533f128f89befad1128e96953190d6968eb9e10e64 not found: ID does not exist" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.766117 4769 scope.go:117] "RemoveContainer" containerID="4b772daffd21bfea3692c7cdfd9435105f1b9ccc469f126441cfa9f888d2e114" Nov 25 11:01:03 crc kubenswrapper[4769]: E1125 11:01:03.766564 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b772daffd21bfea3692c7cdfd9435105f1b9ccc469f126441cfa9f888d2e114\": container with ID starting with 4b772daffd21bfea3692c7cdfd9435105f1b9ccc469f126441cfa9f888d2e114 not found: ID does not exist" containerID="4b772daffd21bfea3692c7cdfd9435105f1b9ccc469f126441cfa9f888d2e114" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.766628 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b772daffd21bfea3692c7cdfd9435105f1b9ccc469f126441cfa9f888d2e114"} err="failed to get container status \"4b772daffd21bfea3692c7cdfd9435105f1b9ccc469f126441cfa9f888d2e114\": rpc error: code = NotFound desc = could not find container \"4b772daffd21bfea3692c7cdfd9435105f1b9ccc469f126441cfa9f888d2e114\": container with ID starting with 4b772daffd21bfea3692c7cdfd9435105f1b9ccc469f126441cfa9f888d2e114 not found: ID does not exist" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.766657 4769 scope.go:117] "RemoveContainer" containerID="21395afcf2a7440441e548a7ad93c8be28e06786dfc6597de881ed00887c307c" Nov 25 11:01:03 crc kubenswrapper[4769]: E1125 11:01:03.767084 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21395afcf2a7440441e548a7ad93c8be28e06786dfc6597de881ed00887c307c\": container with ID starting with 21395afcf2a7440441e548a7ad93c8be28e06786dfc6597de881ed00887c307c not found: ID does not exist" containerID="21395afcf2a7440441e548a7ad93c8be28e06786dfc6597de881ed00887c307c" Nov 25 11:01:03 crc kubenswrapper[4769]: I1125 11:01:03.767148 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21395afcf2a7440441e548a7ad93c8be28e06786dfc6597de881ed00887c307c"} err="failed to get container status \"21395afcf2a7440441e548a7ad93c8be28e06786dfc6597de881ed00887c307c\": rpc error: code = NotFound desc = could not find container \"21395afcf2a7440441e548a7ad93c8be28e06786dfc6597de881ed00887c307c\": container with ID starting with 21395afcf2a7440441e548a7ad93c8be28e06786dfc6597de881ed00887c307c not found: ID does not exist" Nov 25 11:01:04 crc kubenswrapper[4769]: I1125 11:01:04.283102 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cd77c76-d095-45f7-bbdc-608e5e06a0b6" path="/var/lib/kubelet/pods/6cd77c76-d095-45f7-bbdc-608e5e06a0b6/volumes" Nov 25 11:01:05 crc kubenswrapper[4769]: I1125 11:01:05.676227 4769 generic.go:334] "Generic (PLEG): container finished" podID="c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9" containerID="5e7e701962a02026017dd9bad5bdebb47a1759923aec86dc4feb487805067272" exitCode=0 Nov 25 11:01:05 crc kubenswrapper[4769]: I1125 11:01:05.676316 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401141-t6zhj" event={"ID":"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9","Type":"ContainerDied","Data":"5e7e701962a02026017dd9bad5bdebb47a1759923aec86dc4feb487805067272"} Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.203526 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.363575 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-combined-ca-bundle\") pod \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.363731 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-config-data\") pod \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.363977 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7dnk\" (UniqueName: \"kubernetes.io/projected/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-kube-api-access-z7dnk\") pod \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.364035 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-fernet-keys\") pod \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\" (UID: \"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9\") " Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.377113 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9" (UID: "c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.379441 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-kube-api-access-z7dnk" (OuterVolumeSpecName: "kube-api-access-z7dnk") pod "c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9" (UID: "c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9"). InnerVolumeSpecName "kube-api-access-z7dnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.411362 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9" (UID: "c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.446056 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-config-data" (OuterVolumeSpecName: "config-data") pod "c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9" (UID: "c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.467263 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.467494 4769 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.467561 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7dnk\" (UniqueName: \"kubernetes.io/projected/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-kube-api-access-z7dnk\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.467622 4769 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.705652 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401141-t6zhj" event={"ID":"c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9","Type":"ContainerDied","Data":"1b6b5c8f5c49b0650e236ee4517a1aa9f96d00f66018dffe5d516b05151cb0da"} Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.705901 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b6b5c8f5c49b0650e236ee4517a1aa9f96d00f66018dffe5d516b05151cb0da" Nov 25 11:01:07 crc kubenswrapper[4769]: I1125 11:01:07.705922 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401141-t6zhj" Nov 25 11:01:10 crc kubenswrapper[4769]: I1125 11:01:10.237078 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:01:10 crc kubenswrapper[4769]: E1125 11:01:10.238269 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:01:24 crc kubenswrapper[4769]: I1125 11:01:24.238517 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:01:24 crc kubenswrapper[4769]: E1125 11:01:24.240951 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:01:35 crc kubenswrapper[4769]: I1125 11:01:35.238885 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:01:35 crc kubenswrapper[4769]: E1125 11:01:35.239948 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:01:50 crc kubenswrapper[4769]: I1125 11:01:50.237277 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:01:50 crc kubenswrapper[4769]: E1125 11:01:50.238528 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:02:03 crc kubenswrapper[4769]: I1125 11:02:03.237311 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:02:03 crc kubenswrapper[4769]: E1125 11:02:03.239762 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:02:17 crc kubenswrapper[4769]: I1125 11:02:17.238409 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:02:17 crc kubenswrapper[4769]: E1125 11:02:17.239889 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:02:30 crc kubenswrapper[4769]: I1125 11:02:30.239044 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:02:30 crc kubenswrapper[4769]: I1125 11:02:30.889583 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"a9023e5a831ec6d39937fcd0f7843ba8dde118a0ee018068576d8f5bb54ffae1"} Nov 25 11:03:49 crc kubenswrapper[4769]: E1125 11:03:49.452330 4769 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="1.216s" Nov 25 11:04:52 crc kubenswrapper[4769]: I1125 11:04:52.290078 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:04:52 crc kubenswrapper[4769]: I1125 11:04:52.290688 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.317058 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 11:04:54 crc kubenswrapper[4769]: E1125 11:04:54.318091 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9" containerName="keystone-cron" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.318105 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9" containerName="keystone-cron" Nov 25 11:04:54 crc kubenswrapper[4769]: E1125 11:04:54.318124 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cd77c76-d095-45f7-bbdc-608e5e06a0b6" containerName="extract-content" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.318132 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cd77c76-d095-45f7-bbdc-608e5e06a0b6" containerName="extract-content" Nov 25 11:04:54 crc kubenswrapper[4769]: E1125 11:04:54.318145 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cd77c76-d095-45f7-bbdc-608e5e06a0b6" containerName="extract-utilities" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.318151 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cd77c76-d095-45f7-bbdc-608e5e06a0b6" containerName="extract-utilities" Nov 25 11:04:54 crc kubenswrapper[4769]: E1125 11:04:54.318170 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cd77c76-d095-45f7-bbdc-608e5e06a0b6" containerName="registry-server" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.318175 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cd77c76-d095-45f7-bbdc-608e5e06a0b6" containerName="registry-server" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.318413 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9" containerName="keystone-cron" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.318451 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cd77c76-d095-45f7-bbdc-608e5e06a0b6" containerName="registry-server" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.319298 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.321981 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-vc5tq" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.322114 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.322529 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.322743 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.327136 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.431280 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.431418 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.431640 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.431926 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.432106 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.432323 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.432422 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-config-data\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.432509 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gs7sr\" (UniqueName: \"kubernetes.io/projected/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-kube-api-access-gs7sr\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.432553 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.534509 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.534575 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.534646 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.534685 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-config-data\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.534754 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gs7sr\" (UniqueName: \"kubernetes.io/projected/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-kube-api-access-gs7sr\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.534859 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.534883 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.535248 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.535598 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.536027 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-config-data\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.536132 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.536771 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.536833 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.540243 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.540490 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.540660 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.555623 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.560902 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gs7sr\" (UniqueName: \"kubernetes.io/projected/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-kube-api-access-gs7sr\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.587800 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"tempest-tests-tempest\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " pod="openstack/tempest-tests-tempest" Nov 25 11:04:54 crc kubenswrapper[4769]: I1125 11:04:54.639712 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 11:04:55 crc kubenswrapper[4769]: I1125 11:04:55.135221 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 11:04:55 crc kubenswrapper[4769]: I1125 11:04:55.137430 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:04:55 crc kubenswrapper[4769]: I1125 11:04:55.294505 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"03f7a5db-5b5b-4107-ab58-1fc92bea67a1","Type":"ContainerStarted","Data":"ea1c4715a89b221ec2cb536fad6d08aa9e4d5173b32268c2f595124f70ec8aba"} Nov 25 11:05:22 crc kubenswrapper[4769]: I1125 11:05:22.290283 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:05:22 crc kubenswrapper[4769]: I1125 11:05:22.290979 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.454785 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hlvkh"] Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.484449 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hlvkh"] Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.485436 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.670898 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3166d521-e1d9-476b-9f16-23857ce560c9-catalog-content\") pod \"redhat-marketplace-hlvkh\" (UID: \"3166d521-e1d9-476b-9f16-23857ce560c9\") " pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.671384 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqddp\" (UniqueName: \"kubernetes.io/projected/3166d521-e1d9-476b-9f16-23857ce560c9-kube-api-access-mqddp\") pod \"redhat-marketplace-hlvkh\" (UID: \"3166d521-e1d9-476b-9f16-23857ce560c9\") " pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.671495 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3166d521-e1d9-476b-9f16-23857ce560c9-utilities\") pod \"redhat-marketplace-hlvkh\" (UID: \"3166d521-e1d9-476b-9f16-23857ce560c9\") " pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.783630 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3166d521-e1d9-476b-9f16-23857ce560c9-utilities\") pod \"redhat-marketplace-hlvkh\" (UID: \"3166d521-e1d9-476b-9f16-23857ce560c9\") " pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.783770 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3166d521-e1d9-476b-9f16-23857ce560c9-catalog-content\") pod \"redhat-marketplace-hlvkh\" (UID: \"3166d521-e1d9-476b-9f16-23857ce560c9\") " pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.783889 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqddp\" (UniqueName: \"kubernetes.io/projected/3166d521-e1d9-476b-9f16-23857ce560c9-kube-api-access-mqddp\") pod \"redhat-marketplace-hlvkh\" (UID: \"3166d521-e1d9-476b-9f16-23857ce560c9\") " pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.784288 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3166d521-e1d9-476b-9f16-23857ce560c9-utilities\") pod \"redhat-marketplace-hlvkh\" (UID: \"3166d521-e1d9-476b-9f16-23857ce560c9\") " pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.784378 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3166d521-e1d9-476b-9f16-23857ce560c9-catalog-content\") pod \"redhat-marketplace-hlvkh\" (UID: \"3166d521-e1d9-476b-9f16-23857ce560c9\") " pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.812069 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqddp\" (UniqueName: \"kubernetes.io/projected/3166d521-e1d9-476b-9f16-23857ce560c9-kube-api-access-mqddp\") pod \"redhat-marketplace-hlvkh\" (UID: \"3166d521-e1d9-476b-9f16-23857ce560c9\") " pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:05:26 crc kubenswrapper[4769]: I1125 11:05:26.937806 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:05:52 crc kubenswrapper[4769]: I1125 11:05:52.290742 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:05:52 crc kubenswrapper[4769]: I1125 11:05:52.291333 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:05:52 crc kubenswrapper[4769]: I1125 11:05:52.291379 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 11:05:52 crc kubenswrapper[4769]: I1125 11:05:52.292296 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a9023e5a831ec6d39937fcd0f7843ba8dde118a0ee018068576d8f5bb54ffae1"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 11:05:52 crc kubenswrapper[4769]: I1125 11:05:52.292352 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://a9023e5a831ec6d39937fcd0f7843ba8dde118a0ee018068576d8f5bb54ffae1" gracePeriod=600 Nov 25 11:05:53 crc kubenswrapper[4769]: I1125 11:05:53.054275 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="a9023e5a831ec6d39937fcd0f7843ba8dde118a0ee018068576d8f5bb54ffae1" exitCode=0 Nov 25 11:05:53 crc kubenswrapper[4769]: I1125 11:05:53.054633 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"a9023e5a831ec6d39937fcd0f7843ba8dde118a0ee018068576d8f5bb54ffae1"} Nov 25 11:05:53 crc kubenswrapper[4769]: I1125 11:05:53.054677 4769 scope.go:117] "RemoveContainer" containerID="319debd1d2820bc9978952132ea229b634e7709f812ece9fe2a1e10c727e5d05" Nov 25 11:06:02 crc kubenswrapper[4769]: E1125 11:06:02.054004 4769 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 25 11:06:02 crc kubenswrapper[4769]: E1125 11:06:02.058229 4769 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gs7sr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(03f7a5db-5b5b-4107-ab58-1fc92bea67a1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 11:06:02 crc kubenswrapper[4769]: E1125 11:06:02.060989 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="03f7a5db-5b5b-4107-ab58-1fc92bea67a1" Nov 25 11:06:02 crc kubenswrapper[4769]: E1125 11:06:02.180003 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="03f7a5db-5b5b-4107-ab58-1fc92bea67a1" Nov 25 11:06:03 crc kubenswrapper[4769]: I1125 11:06:03.093337 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hlvkh"] Nov 25 11:06:03 crc kubenswrapper[4769]: I1125 11:06:03.191497 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hlvkh" event={"ID":"3166d521-e1d9-476b-9f16-23857ce560c9","Type":"ContainerStarted","Data":"31a8ec75a11460f0ad96c9707e38b15f47a26b28ab5f7bb6cb0d543b7505b2a7"} Nov 25 11:06:03 crc kubenswrapper[4769]: I1125 11:06:03.194054 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5"} Nov 25 11:06:04 crc kubenswrapper[4769]: I1125 11:06:04.210401 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hlvkh" event={"ID":"3166d521-e1d9-476b-9f16-23857ce560c9","Type":"ContainerStarted","Data":"a2beb22c82b85a16f8a3591aa463de19ffd6ba1cb31c06262c4d741fa4bba273"} Nov 25 11:06:05 crc kubenswrapper[4769]: I1125 11:06:05.223412 4769 generic.go:334] "Generic (PLEG): container finished" podID="3166d521-e1d9-476b-9f16-23857ce560c9" containerID="a2beb22c82b85a16f8a3591aa463de19ffd6ba1cb31c06262c4d741fa4bba273" exitCode=0 Nov 25 11:06:05 crc kubenswrapper[4769]: I1125 11:06:05.223704 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hlvkh" event={"ID":"3166d521-e1d9-476b-9f16-23857ce560c9","Type":"ContainerDied","Data":"a2beb22c82b85a16f8a3591aa463de19ffd6ba1cb31c06262c4d741fa4bba273"} Nov 25 11:06:07 crc kubenswrapper[4769]: I1125 11:06:07.248766 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hlvkh" event={"ID":"3166d521-e1d9-476b-9f16-23857ce560c9","Type":"ContainerStarted","Data":"cd3013710b1a3a729fb965bc559a5263273e349fbed0b58b6a737bf421075fe9"} Nov 25 11:06:08 crc kubenswrapper[4769]: I1125 11:06:08.259317 4769 generic.go:334] "Generic (PLEG): container finished" podID="3166d521-e1d9-476b-9f16-23857ce560c9" containerID="cd3013710b1a3a729fb965bc559a5263273e349fbed0b58b6a737bf421075fe9" exitCode=0 Nov 25 11:06:08 crc kubenswrapper[4769]: I1125 11:06:08.259370 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hlvkh" event={"ID":"3166d521-e1d9-476b-9f16-23857ce560c9","Type":"ContainerDied","Data":"cd3013710b1a3a729fb965bc559a5263273e349fbed0b58b6a737bf421075fe9"} Nov 25 11:06:09 crc kubenswrapper[4769]: I1125 11:06:09.271740 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hlvkh" event={"ID":"3166d521-e1d9-476b-9f16-23857ce560c9","Type":"ContainerStarted","Data":"6a0e15bf741b01cd3873063d65682c0e1346abf1be3f3ab7e9f8bde43c587afd"} Nov 25 11:06:09 crc kubenswrapper[4769]: I1125 11:06:09.294264 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hlvkh" podStartSLOduration=39.691611795 podStartE2EDuration="43.29424017s" podCreationTimestamp="2025-11-25 11:05:26 +0000 UTC" firstStartedPulling="2025-11-25 11:06:05.228689243 +0000 UTC m=+4913.813661556" lastFinishedPulling="2025-11-25 11:06:08.831317618 +0000 UTC m=+4917.416289931" observedRunningTime="2025-11-25 11:06:09.287937311 +0000 UTC m=+4917.872909634" watchObservedRunningTime="2025-11-25 11:06:09.29424017 +0000 UTC m=+4917.879212483" Nov 25 11:06:14 crc kubenswrapper[4769]: I1125 11:06:14.305247 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 11:06:16 crc kubenswrapper[4769]: I1125 11:06:16.938216 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:06:16 crc kubenswrapper[4769]: I1125 11:06:16.938572 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:06:16 crc kubenswrapper[4769]: I1125 11:06:16.991788 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:06:17 crc kubenswrapper[4769]: I1125 11:06:17.371038 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"03f7a5db-5b5b-4107-ab58-1fc92bea67a1","Type":"ContainerStarted","Data":"9d6770ac412bfcf02ae85d65be770e22456a557e0627cfad10dba0492dac096c"} Nov 25 11:06:17 crc kubenswrapper[4769]: I1125 11:06:17.400928 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=5.235583661 podStartE2EDuration="1m24.400908538s" podCreationTimestamp="2025-11-25 11:04:53 +0000 UTC" firstStartedPulling="2025-11-25 11:04:55.137223324 +0000 UTC m=+4843.722195627" lastFinishedPulling="2025-11-25 11:06:14.302548191 +0000 UTC m=+4922.887520504" observedRunningTime="2025-11-25 11:06:17.389842322 +0000 UTC m=+4925.974814645" watchObservedRunningTime="2025-11-25 11:06:17.400908538 +0000 UTC m=+4925.985880861" Nov 25 11:06:17 crc kubenswrapper[4769]: I1125 11:06:17.443926 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:06:17 crc kubenswrapper[4769]: I1125 11:06:17.493837 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hlvkh"] Nov 25 11:06:19 crc kubenswrapper[4769]: I1125 11:06:19.390623 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hlvkh" podUID="3166d521-e1d9-476b-9f16-23857ce560c9" containerName="registry-server" containerID="cri-o://6a0e15bf741b01cd3873063d65682c0e1346abf1be3f3ab7e9f8bde43c587afd" gracePeriod=2 Nov 25 11:06:20 crc kubenswrapper[4769]: I1125 11:06:20.411482 4769 generic.go:334] "Generic (PLEG): container finished" podID="3166d521-e1d9-476b-9f16-23857ce560c9" containerID="6a0e15bf741b01cd3873063d65682c0e1346abf1be3f3ab7e9f8bde43c587afd" exitCode=0 Nov 25 11:06:20 crc kubenswrapper[4769]: I1125 11:06:20.411573 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hlvkh" event={"ID":"3166d521-e1d9-476b-9f16-23857ce560c9","Type":"ContainerDied","Data":"6a0e15bf741b01cd3873063d65682c0e1346abf1be3f3ab7e9f8bde43c587afd"} Nov 25 11:06:20 crc kubenswrapper[4769]: I1125 11:06:20.883470 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.085484 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3166d521-e1d9-476b-9f16-23857ce560c9-catalog-content\") pod \"3166d521-e1d9-476b-9f16-23857ce560c9\" (UID: \"3166d521-e1d9-476b-9f16-23857ce560c9\") " Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.085532 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqddp\" (UniqueName: \"kubernetes.io/projected/3166d521-e1d9-476b-9f16-23857ce560c9-kube-api-access-mqddp\") pod \"3166d521-e1d9-476b-9f16-23857ce560c9\" (UID: \"3166d521-e1d9-476b-9f16-23857ce560c9\") " Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.085813 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3166d521-e1d9-476b-9f16-23857ce560c9-utilities\") pod \"3166d521-e1d9-476b-9f16-23857ce560c9\" (UID: \"3166d521-e1d9-476b-9f16-23857ce560c9\") " Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.086798 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3166d521-e1d9-476b-9f16-23857ce560c9-utilities" (OuterVolumeSpecName: "utilities") pod "3166d521-e1d9-476b-9f16-23857ce560c9" (UID: "3166d521-e1d9-476b-9f16-23857ce560c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.093307 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3166d521-e1d9-476b-9f16-23857ce560c9-kube-api-access-mqddp" (OuterVolumeSpecName: "kube-api-access-mqddp") pod "3166d521-e1d9-476b-9f16-23857ce560c9" (UID: "3166d521-e1d9-476b-9f16-23857ce560c9"). InnerVolumeSpecName "kube-api-access-mqddp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.109031 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3166d521-e1d9-476b-9f16-23857ce560c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3166d521-e1d9-476b-9f16-23857ce560c9" (UID: "3166d521-e1d9-476b-9f16-23857ce560c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.189659 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3166d521-e1d9-476b-9f16-23857ce560c9-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.189697 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3166d521-e1d9-476b-9f16-23857ce560c9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.189711 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqddp\" (UniqueName: \"kubernetes.io/projected/3166d521-e1d9-476b-9f16-23857ce560c9-kube-api-access-mqddp\") on node \"crc\" DevicePath \"\"" Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.426095 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hlvkh" event={"ID":"3166d521-e1d9-476b-9f16-23857ce560c9","Type":"ContainerDied","Data":"31a8ec75a11460f0ad96c9707e38b15f47a26b28ab5f7bb6cb0d543b7505b2a7"} Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.426409 4769 scope.go:117] "RemoveContainer" containerID="6a0e15bf741b01cd3873063d65682c0e1346abf1be3f3ab7e9f8bde43c587afd" Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.426159 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hlvkh" Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.465839 4769 scope.go:117] "RemoveContainer" containerID="cd3013710b1a3a729fb965bc559a5263273e349fbed0b58b6a737bf421075fe9" Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.470533 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hlvkh"] Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.482928 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hlvkh"] Nov 25 11:06:21 crc kubenswrapper[4769]: I1125 11:06:21.501830 4769 scope.go:117] "RemoveContainer" containerID="a2beb22c82b85a16f8a3591aa463de19ffd6ba1cb31c06262c4d741fa4bba273" Nov 25 11:06:22 crc kubenswrapper[4769]: I1125 11:06:22.250867 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3166d521-e1d9-476b-9f16-23857ce560c9" path="/var/lib/kubelet/pods/3166d521-e1d9-476b-9f16-23857ce560c9/volumes" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.518879 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 11:07:21 crc kubenswrapper[4769]: E1125 11:07:21.582877 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3166d521-e1d9-476b-9f16-23857ce560c9" containerName="extract-utilities" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.582915 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3166d521-e1d9-476b-9f16-23857ce560c9" containerName="extract-utilities" Nov 25 11:07:21 crc kubenswrapper[4769]: E1125 11:07:21.582935 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3166d521-e1d9-476b-9f16-23857ce560c9" containerName="extract-content" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.582945 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3166d521-e1d9-476b-9f16-23857ce560c9" containerName="extract-content" Nov 25 11:07:21 crc kubenswrapper[4769]: E1125 11:07:21.583001 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3166d521-e1d9-476b-9f16-23857ce560c9" containerName="registry-server" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.583017 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="3166d521-e1d9-476b-9f16-23857ce560c9" containerName="registry-server" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.583369 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="3166d521-e1d9-476b-9f16-23857ce560c9" containerName="registry-server" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.585227 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.587383 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.589381 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.605425 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.748514 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64ee4f63-4adc-44d0-939c-8ae692d4fce3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"64ee4f63-4adc-44d0-939c-8ae692d4fce3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.748593 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/64ee4f63-4adc-44d0-939c-8ae692d4fce3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"64ee4f63-4adc-44d0-939c-8ae692d4fce3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.851526 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64ee4f63-4adc-44d0-939c-8ae692d4fce3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"64ee4f63-4adc-44d0-939c-8ae692d4fce3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.851587 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/64ee4f63-4adc-44d0-939c-8ae692d4fce3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"64ee4f63-4adc-44d0-939c-8ae692d4fce3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.851843 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/64ee4f63-4adc-44d0-939c-8ae692d4fce3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"64ee4f63-4adc-44d0-939c-8ae692d4fce3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.871232 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64ee4f63-4adc-44d0-939c-8ae692d4fce3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"64ee4f63-4adc-44d0-939c-8ae692d4fce3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:07:21 crc kubenswrapper[4769]: I1125 11:07:21.912509 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:07:22 crc kubenswrapper[4769]: I1125 11:07:22.623873 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 11:07:23 crc kubenswrapper[4769]: I1125 11:07:23.113663 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"64ee4f63-4adc-44d0-939c-8ae692d4fce3","Type":"ContainerStarted","Data":"ad30877a0fbf3f5033c89707c91248134e591b826ba1884e56f0d1cfcebd791a"} Nov 25 11:07:24 crc kubenswrapper[4769]: I1125 11:07:24.126132 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"64ee4f63-4adc-44d0-939c-8ae692d4fce3","Type":"ContainerStarted","Data":"9417aec1160f5c645664fde72e24de274df5fe54880d43b9fe67536a6704e3ca"} Nov 25 11:07:24 crc kubenswrapper[4769]: I1125 11:07:24.157663 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=3.157637234 podStartE2EDuration="3.157637234s" podCreationTimestamp="2025-11-25 11:07:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:07:24.140456294 +0000 UTC m=+4992.725428607" watchObservedRunningTime="2025-11-25 11:07:24.157637234 +0000 UTC m=+4992.742609557" Nov 25 11:07:26 crc kubenswrapper[4769]: I1125 11:07:26.147884 4769 generic.go:334] "Generic (PLEG): container finished" podID="64ee4f63-4adc-44d0-939c-8ae692d4fce3" containerID="9417aec1160f5c645664fde72e24de274df5fe54880d43b9fe67536a6704e3ca" exitCode=0 Nov 25 11:07:26 crc kubenswrapper[4769]: I1125 11:07:26.148357 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"64ee4f63-4adc-44d0-939c-8ae692d4fce3","Type":"ContainerDied","Data":"9417aec1160f5c645664fde72e24de274df5fe54880d43b9fe67536a6704e3ca"} Nov 25 11:07:27 crc kubenswrapper[4769]: I1125 11:07:27.815941 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:07:27 crc kubenswrapper[4769]: I1125 11:07:27.913319 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/64ee4f63-4adc-44d0-939c-8ae692d4fce3-kubelet-dir\") pod \"64ee4f63-4adc-44d0-939c-8ae692d4fce3\" (UID: \"64ee4f63-4adc-44d0-939c-8ae692d4fce3\") " Nov 25 11:07:27 crc kubenswrapper[4769]: I1125 11:07:27.913413 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64ee4f63-4adc-44d0-939c-8ae692d4fce3-kube-api-access\") pod \"64ee4f63-4adc-44d0-939c-8ae692d4fce3\" (UID: \"64ee4f63-4adc-44d0-939c-8ae692d4fce3\") " Nov 25 11:07:27 crc kubenswrapper[4769]: I1125 11:07:27.913436 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/64ee4f63-4adc-44d0-939c-8ae692d4fce3-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "64ee4f63-4adc-44d0-939c-8ae692d4fce3" (UID: "64ee4f63-4adc-44d0-939c-8ae692d4fce3"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:07:27 crc kubenswrapper[4769]: I1125 11:07:27.914016 4769 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/64ee4f63-4adc-44d0-939c-8ae692d4fce3-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.045614 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64ee4f63-4adc-44d0-939c-8ae692d4fce3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "64ee4f63-4adc-44d0-939c-8ae692d4fce3" (UID: "64ee4f63-4adc-44d0-939c-8ae692d4fce3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.119018 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/64ee4f63-4adc-44d0-939c-8ae692d4fce3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.180651 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"64ee4f63-4adc-44d0-939c-8ae692d4fce3","Type":"ContainerDied","Data":"ad30877a0fbf3f5033c89707c91248134e591b826ba1884e56f0d1cfcebd791a"} Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.180693 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad30877a0fbf3f5033c89707c91248134e591b826ba1884e56f0d1cfcebd791a" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.180745 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.712571 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 11:07:28 crc kubenswrapper[4769]: E1125 11:07:28.713092 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64ee4f63-4adc-44d0-939c-8ae692d4fce3" containerName="pruner" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.713110 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="64ee4f63-4adc-44d0-939c-8ae692d4fce3" containerName="pruner" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.713343 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="64ee4f63-4adc-44d0-939c-8ae692d4fce3" containerName="pruner" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.714247 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.717267 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.717471 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.741237 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.842060 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-kube-api-access\") pod \"installer-9-crc\" (UID: \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.842119 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-var-lock\") pod \"installer-9-crc\" (UID: \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.842179 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.943905 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.944020 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.944163 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-kube-api-access\") pod \"installer-9-crc\" (UID: \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.944184 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-var-lock\") pod \"installer-9-crc\" (UID: \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.944254 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-var-lock\") pod \"installer-9-crc\" (UID: \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:07:28 crc kubenswrapper[4769]: I1125 11:07:28.965091 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-kube-api-access\") pod \"installer-9-crc\" (UID: \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:07:29 crc kubenswrapper[4769]: I1125 11:07:29.045267 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:07:29 crc kubenswrapper[4769]: I1125 11:07:29.563704 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 11:07:30 crc kubenswrapper[4769]: I1125 11:07:30.208151 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7264b0b4-0f02-4841-9b8a-5f247b3c42f1","Type":"ContainerStarted","Data":"f5b7a34d3a0141fc1bfd57cf063dbc88e4b94155876890d7746804ea9beb27ca"} Nov 25 11:07:30 crc kubenswrapper[4769]: I1125 11:07:30.208707 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7264b0b4-0f02-4841-9b8a-5f247b3c42f1","Type":"ContainerStarted","Data":"0f087157b1cb41aed36c03ef2d9225161d64f8bb02b5846eb1e7a3262c2e2374"} Nov 25 11:07:30 crc kubenswrapper[4769]: I1125 11:07:30.228080 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.228041026 podStartE2EDuration="2.228041026s" podCreationTimestamp="2025-11-25 11:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:07:30.222863027 +0000 UTC m=+4998.807835340" watchObservedRunningTime="2025-11-25 11:07:30.228041026 +0000 UTC m=+4998.813013339" Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.079341 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-59lwg"] Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.082145 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.115299 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-59lwg"] Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.125805 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-catalog-content\") pod \"community-operators-59lwg\" (UID: \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\") " pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.126150 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jfg8\" (UniqueName: \"kubernetes.io/projected/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-kube-api-access-6jfg8\") pod \"community-operators-59lwg\" (UID: \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\") " pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.126213 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-utilities\") pod \"community-operators-59lwg\" (UID: \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\") " pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.228540 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jfg8\" (UniqueName: \"kubernetes.io/projected/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-kube-api-access-6jfg8\") pod \"community-operators-59lwg\" (UID: \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\") " pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.228623 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-utilities\") pod \"community-operators-59lwg\" (UID: \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\") " pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.228699 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-catalog-content\") pod \"community-operators-59lwg\" (UID: \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\") " pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.229382 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-catalog-content\") pod \"community-operators-59lwg\" (UID: \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\") " pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.229404 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-utilities\") pod \"community-operators-59lwg\" (UID: \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\") " pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.252860 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jfg8\" (UniqueName: \"kubernetes.io/projected/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-kube-api-access-6jfg8\") pod \"community-operators-59lwg\" (UID: \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\") " pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.456890 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:32 crc kubenswrapper[4769]: I1125 11:07:32.987111 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-59lwg"] Nov 25 11:07:33 crc kubenswrapper[4769]: I1125 11:07:33.241701 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59lwg" event={"ID":"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c","Type":"ContainerStarted","Data":"c9973f80bdb330b9aa618c8a1e0c1373807dcd679e372630c89497be947d1d91"} Nov 25 11:07:34 crc kubenswrapper[4769]: I1125 11:07:34.270349 4769 generic.go:334] "Generic (PLEG): container finished" podID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerID="c3f13af9127764c58d2fdbbe10049fc30f347ca528920d0e5f4a8ef419174def" exitCode=0 Nov 25 11:07:34 crc kubenswrapper[4769]: I1125 11:07:34.270439 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59lwg" event={"ID":"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c","Type":"ContainerDied","Data":"c3f13af9127764c58d2fdbbe10049fc30f347ca528920d0e5f4a8ef419174def"} Nov 25 11:07:36 crc kubenswrapper[4769]: I1125 11:07:36.299834 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59lwg" event={"ID":"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c","Type":"ContainerStarted","Data":"4ffaa6f5d5b58fb9d51c7e199e37d73b9de720dc76f1d5a84234ed5086c3bfbe"} Nov 25 11:07:45 crc kubenswrapper[4769]: I1125 11:07:45.405114 4769 generic.go:334] "Generic (PLEG): container finished" podID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerID="4ffaa6f5d5b58fb9d51c7e199e37d73b9de720dc76f1d5a84234ed5086c3bfbe" exitCode=0 Nov 25 11:07:45 crc kubenswrapper[4769]: I1125 11:07:45.405196 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59lwg" event={"ID":"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c","Type":"ContainerDied","Data":"4ffaa6f5d5b58fb9d51c7e199e37d73b9de720dc76f1d5a84234ed5086c3bfbe"} Nov 25 11:07:49 crc kubenswrapper[4769]: I1125 11:07:49.596401 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59lwg" event={"ID":"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c","Type":"ContainerStarted","Data":"76a315f23d8193ceea354b8a1239cea5998d44c943d0ca8e2897621caea918a9"} Nov 25 11:07:49 crc kubenswrapper[4769]: I1125 11:07:49.618662 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-59lwg" podStartSLOduration=3.521351577 podStartE2EDuration="17.618647s" podCreationTimestamp="2025-11-25 11:07:32 +0000 UTC" firstStartedPulling="2025-11-25 11:07:34.272429626 +0000 UTC m=+5002.857401939" lastFinishedPulling="2025-11-25 11:07:48.369725049 +0000 UTC m=+5016.954697362" observedRunningTime="2025-11-25 11:07:49.61754117 +0000 UTC m=+5018.202513493" watchObservedRunningTime="2025-11-25 11:07:49.618647 +0000 UTC m=+5018.203619313" Nov 25 11:07:52 crc kubenswrapper[4769]: I1125 11:07:52.457894 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:52 crc kubenswrapper[4769]: I1125 11:07:52.458807 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:07:53 crc kubenswrapper[4769]: I1125 11:07:53.519683 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-59lwg" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerName="registry-server" probeResult="failure" output=< Nov 25 11:07:53 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:07:53 crc kubenswrapper[4769]: > Nov 25 11:08:03 crc kubenswrapper[4769]: I1125 11:08:03.517515 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-59lwg" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerName="registry-server" probeResult="failure" output=< Nov 25 11:08:03 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:08:03 crc kubenswrapper[4769]: > Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.808065 4769 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.808758 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96" gracePeriod=15 Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.808775 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469" gracePeriod=15 Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.808846 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a" gracePeriod=15 Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.808842 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24" gracePeriod=15 Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.808921 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291" gracePeriod=15 Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.811114 4769 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 11:08:07 crc kubenswrapper[4769]: E1125 11:08:07.811748 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.811772 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 11:08:07 crc kubenswrapper[4769]: E1125 11:08:07.811803 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.811811 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 11:08:07 crc kubenswrapper[4769]: E1125 11:08:07.811825 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.811833 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 11:08:07 crc kubenswrapper[4769]: E1125 11:08:07.811848 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.811857 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 11:08:07 crc kubenswrapper[4769]: E1125 11:08:07.811889 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.811897 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 11:08:07 crc kubenswrapper[4769]: E1125 11:08:07.811914 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.811921 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.812227 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.812252 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.812274 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.812288 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.812310 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.812320 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 11:08:07 crc kubenswrapper[4769]: E1125 11:08:07.812587 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.812597 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.817529 4769 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.818944 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.849624 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.849724 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.850190 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.837929 4769 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.919772 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.952881 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.952944 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.952998 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.953018 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.953063 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.953131 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.953158 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.953210 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.953371 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.953408 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:07 crc kubenswrapper[4769]: I1125 11:08:07.953447 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.056135 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.056459 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.056618 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.056738 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.056775 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.056787 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.056824 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.056843 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.056903 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.057003 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.193097 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.247171 4769 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.201:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b3b4eb2d0ba88 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 11:08:08.239192712 +0000 UTC m=+5036.824165025,LastTimestamp:2025-11-25 11:08:08.239192712 +0000 UTC m=+5036.824165025,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.341129 4769 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/prometheus-metric-storage-db-prometheus-metric-storage-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/prometheus-metric-storage-db-prometheus-metric-storage-0\": dial tcp 38.102.83.201:6443: connect: connection refused" pod="openstack/prometheus-metric-storage-0" volumeName="prometheus-metric-storage-db" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.496961 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T11:08:08Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T11:08:08Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T11:08:08Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T11:08:08Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.499662 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.500103 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.500348 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.500595 4769 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.500614 4769 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.866131 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.870446 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.872758 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469" exitCode=0 Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.872795 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291" exitCode=0 Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.872806 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24" exitCode=0 Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.872814 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a" exitCode=2 Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.872881 4769 scope.go:117] "RemoveContainer" containerID="054ea92ef7971cc96ba6f1c63ea5c25b4d411f498f0a038e4ff232cde1bfdf07" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.875225 4769 generic.go:334] "Generic (PLEG): container finished" podID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" containerID="f5b7a34d3a0141fc1bfd57cf063dbc88e4b94155876890d7746804ea9beb27ca" exitCode=0 Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.875314 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7264b0b4-0f02-4841-9b8a-5f247b3c42f1","Type":"ContainerDied","Data":"f5b7a34d3a0141fc1bfd57cf063dbc88e4b94155876890d7746804ea9beb27ca"} Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.876364 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.876701 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.877767 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"08c10fa3fd260fed5e22367d730a2ad2c2029b34578d012f2e114280cb9bd700"} Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.877810 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"0a11255edf71f9f090a4680b80078c916d53329ced82cc2812d602bf145d01c2"} Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.878648 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.878947 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.896464 4769 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.896937 4769 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.897366 4769 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.897946 4769 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.898290 4769 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:08 crc kubenswrapper[4769]: I1125 11:08:08.898329 4769 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 25 11:08:08 crc kubenswrapper[4769]: E1125 11:08:08.899466 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" interval="200ms" Nov 25 11:08:09 crc kubenswrapper[4769]: E1125 11:08:09.100594 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" interval="400ms" Nov 25 11:08:09 crc kubenswrapper[4769]: E1125 11:08:09.502346 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" interval="800ms" Nov 25 11:08:09 crc kubenswrapper[4769]: I1125 11:08:09.895943 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 11:08:10 crc kubenswrapper[4769]: E1125 11:08:10.306357 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" interval="1.6s" Nov 25 11:08:10 crc kubenswrapper[4769]: I1125 11:08:10.912307 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 11:08:10 crc kubenswrapper[4769]: I1125 11:08:10.913271 4769 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96" exitCode=0 Nov 25 11:08:11 crc kubenswrapper[4769]: E1125 11:08:11.325458 4769 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/ovndbcluster-nb-etc-ovn-ovsdbserver-nb-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/ovndbcluster-nb-etc-ovn-ovsdbserver-nb-0\": dial tcp 38.102.83.201:6443: connect: connection refused" pod="openstack/ovsdbserver-nb-0" volumeName="ovndbcluster-nb-etc-ovn" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.544247 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.545174 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.545485 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.557508 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.559027 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.560337 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.560838 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.561117 4769 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.730637 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-kube-api-access\") pod \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\" (UID: \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\") " Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.731009 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-kubelet-dir\") pod \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\" (UID: \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\") " Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.731124 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.731161 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7264b0b4-0f02-4841-9b8a-5f247b3c42f1" (UID: "7264b0b4-0f02-4841-9b8a-5f247b3c42f1"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.731201 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-var-lock\") pod \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\" (UID: \"7264b0b4-0f02-4841-9b8a-5f247b3c42f1\") " Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.731237 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.731270 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-var-lock" (OuterVolumeSpecName: "var-lock") pod "7264b0b4-0f02-4841-9b8a-5f247b3c42f1" (UID: "7264b0b4-0f02-4841-9b8a-5f247b3c42f1"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.731356 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.731254 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.731727 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.731902 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.732655 4769 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.732674 4769 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.732683 4769 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.732691 4769 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.732698 4769 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.752774 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7264b0b4-0f02-4841-9b8a-5f247b3c42f1" (UID: "7264b0b4-0f02-4841-9b8a-5f247b3c42f1"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.834268 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7264b0b4-0f02-4841-9b8a-5f247b3c42f1-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 11:08:11 crc kubenswrapper[4769]: E1125 11:08:11.907560 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" interval="3.2s" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.931082 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7264b0b4-0f02-4841-9b8a-5f247b3c42f1","Type":"ContainerDied","Data":"0f087157b1cb41aed36c03ef2d9225161d64f8bb02b5846eb1e7a3262c2e2374"} Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.931127 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f087157b1cb41aed36c03ef2d9225161d64f8bb02b5846eb1e7a3262c2e2374" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.931101 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.936464 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.937410 4769 scope.go:117] "RemoveContainer" containerID="a89ab5e069d179b9fdb84112109d363b53dfecfc3b2af157c63d5636ce13b469" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.937500 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.957383 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.957891 4769 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.958248 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.965792 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.966396 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.967299 4769 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:11 crc kubenswrapper[4769]: I1125 11:08:11.981262 4769 scope.go:117] "RemoveContainer" containerID="b7917ef57720cd277ff739a4833b02238d3377f0662b48197cfe0b915cfc3291" Nov 25 11:08:12 crc kubenswrapper[4769]: I1125 11:08:12.010435 4769 scope.go:117] "RemoveContainer" containerID="bc6e016d87bfa4c3cfabadd30b8a355fd310ac4ee2c3e74214f453a10a6ddc24" Nov 25 11:08:12 crc kubenswrapper[4769]: I1125 11:08:12.036582 4769 scope.go:117] "RemoveContainer" containerID="cfef130b74225cba6765c8f9a0b14cc38ad34fade2c4492a2855c2383983718a" Nov 25 11:08:12 crc kubenswrapper[4769]: I1125 11:08:12.082682 4769 scope.go:117] "RemoveContainer" containerID="8242808ec6804778ee15da1f0aec5cb23be4d5734a06495cf99f0af3804afc96" Nov 25 11:08:12 crc kubenswrapper[4769]: I1125 11:08:12.109410 4769 scope.go:117] "RemoveContainer" containerID="2bb4fba9767ca973dfb9ec6e5da7dec7939256d5aa768f5dd2f4473b247a47e9" Nov 25 11:08:12 crc kubenswrapper[4769]: I1125 11:08:12.247950 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:12 crc kubenswrapper[4769]: I1125 11:08:12.248295 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:12 crc kubenswrapper[4769]: I1125 11:08:12.248543 4769 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:12 crc kubenswrapper[4769]: I1125 11:08:12.258438 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 25 11:08:13 crc kubenswrapper[4769]: I1125 11:08:13.176327 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:08:13 crc kubenswrapper[4769]: I1125 11:08:13.177546 4769 status_manager.go:851] "Failed to get status for pod" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" pod="openshift-marketplace/community-operators-59lwg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-59lwg\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:13 crc kubenswrapper[4769]: I1125 11:08:13.177781 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:13 crc kubenswrapper[4769]: I1125 11:08:13.177988 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:13 crc kubenswrapper[4769]: I1125 11:08:13.226805 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:08:13 crc kubenswrapper[4769]: I1125 11:08:13.227412 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:13 crc kubenswrapper[4769]: I1125 11:08:13.227795 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:13 crc kubenswrapper[4769]: I1125 11:08:13.228093 4769 status_manager.go:851] "Failed to get status for pod" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" pod="openshift-marketplace/community-operators-59lwg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-59lwg\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:13 crc kubenswrapper[4769]: E1125 11:08:13.793206 4769 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.201:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b3b4eb2d0ba88 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 11:08:08.239192712 +0000 UTC m=+5036.824165025,LastTimestamp:2025-11-25 11:08:08.239192712 +0000 UTC m=+5036.824165025,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 11:08:14 crc kubenswrapper[4769]: E1125 11:08:14.291459 4769 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-logging/storage-logging-loki-index-gateway-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-logging/persistentvolumeclaims/storage-logging-loki-index-gateway-0\": dial tcp 38.102.83.201:6443: connect: connection refused" pod="openshift-logging/logging-loki-index-gateway-0" volumeName="storage" Nov 25 11:08:15 crc kubenswrapper[4769]: E1125 11:08:15.109004 4769 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.201:6443: connect: connection refused" interval="6.4s" Nov 25 11:08:15 crc kubenswrapper[4769]: I1125 11:08:15.699622 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 11:08:18 crc kubenswrapper[4769]: I1125 11:08:18.236031 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:18 crc kubenswrapper[4769]: I1125 11:08:18.238089 4769 status_manager.go:851] "Failed to get status for pod" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" pod="openshift-marketplace/community-operators-59lwg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-59lwg\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:18 crc kubenswrapper[4769]: I1125 11:08:18.238475 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:18 crc kubenswrapper[4769]: I1125 11:08:18.239143 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:18 crc kubenswrapper[4769]: I1125 11:08:18.268471 4769 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8bbf363f-faf3-45da-a19f-54cc0119825c" Nov 25 11:08:18 crc kubenswrapper[4769]: I1125 11:08:18.268936 4769 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8bbf363f-faf3-45da-a19f-54cc0119825c" Nov 25 11:08:18 crc kubenswrapper[4769]: E1125 11:08:18.269411 4769 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:18 crc kubenswrapper[4769]: I1125 11:08:18.270341 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.033870 4769 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="94000c281403abab8e953ad47c3e1bcb6e7e0e6b2114cfe89a00234032852f92" exitCode=0 Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.034409 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"94000c281403abab8e953ad47c3e1bcb6e7e0e6b2114cfe89a00234032852f92"} Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.034437 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"ac3823f166b020b89869383eb3a933fa7320d9217261e02e9a95d8a883a4492c"} Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.034651 4769 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8bbf363f-faf3-45da-a19f-54cc0119825c" Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.034667 4769 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8bbf363f-faf3-45da-a19f-54cc0119825c" Nov 25 11:08:19 crc kubenswrapper[4769]: E1125 11:08:19.035013 4769 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.035432 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.035771 4769 status_manager.go:851] "Failed to get status for pod" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" pod="openshift-marketplace/community-operators-59lwg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-59lwg\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.036101 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.037187 4769 generic.go:334] "Generic (PLEG): container finished" podID="9441dbc7-716c-413e-b0ea-bf1ef05b1608" containerID="4ab3af93f3df09f3bd9984b319746b513e6c0b970fbaaafbf6a6c79973534d0b" exitCode=1 Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.037220 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" event={"ID":"9441dbc7-716c-413e-b0ea-bf1ef05b1608","Type":"ContainerDied","Data":"4ab3af93f3df09f3bd9984b319746b513e6c0b970fbaaafbf6a6c79973534d0b"} Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.038028 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.038071 4769 scope.go:117] "RemoveContainer" containerID="4ab3af93f3df09f3bd9984b319746b513e6c0b970fbaaafbf6a6c79973534d0b" Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.038287 4769 status_manager.go:851] "Failed to get status for pod" podUID="9441dbc7-716c-413e-b0ea-bf1ef05b1608" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-64569bb78d-pzqdq\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.038623 4769 status_manager.go:851] "Failed to get status for pod" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" pod="openshift-marketplace/community-operators-59lwg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-59lwg\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:19 crc kubenswrapper[4769]: I1125 11:08:19.038996 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:19 crc kubenswrapper[4769]: E1125 11:08:19.315996 4769 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/mysql-db-openstack-cell1-galera-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/mysql-db-openstack-cell1-galera-0\": dial tcp 38.102.83.201:6443: connect: connection refused" pod="openstack/openstack-cell1-galera-0" volumeName="mysql-db" Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.050598 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"77c4914f163e4e552f5a71c1840ca4d3038cd4608710a3f39402a70deaddda93"} Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.050846 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"302a2ba332c3278be91b2f0aa3ad653a52b62878b14b3a389a8aaa05b32a63c8"} Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.053752 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" event={"ID":"9441dbc7-716c-413e-b0ea-bf1ef05b1608","Type":"ContainerStarted","Data":"4e20ea5973bec117853583cca2f6ed9ac917d292b5b14f4ca82a8a2adaf6b30b"} Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.054026 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.054889 4769 status_manager.go:851] "Failed to get status for pod" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" pod="openshift-marketplace/community-operators-59lwg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-59lwg\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.055339 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.055643 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.055876 4769 status_manager.go:851] "Failed to get status for pod" podUID="9441dbc7-716c-413e-b0ea-bf1ef05b1608" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-64569bb78d-pzqdq\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.056545 4769 generic.go:334] "Generic (PLEG): container finished" podID="930d9174-b8de-465e-a1b7-b9aa7c498246" containerID="20c95cf0fe1932f6b7e146e8e94f3355cf90208fb5c66c0ee3a7651b76a7628d" exitCode=1 Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.056578 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" event={"ID":"930d9174-b8de-465e-a1b7-b9aa7c498246","Type":"ContainerDied","Data":"20c95cf0fe1932f6b7e146e8e94f3355cf90208fb5c66c0ee3a7651b76a7628d"} Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.057339 4769 status_manager.go:851] "Failed to get status for pod" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.057379 4769 scope.go:117] "RemoveContainer" containerID="20c95cf0fe1932f6b7e146e8e94f3355cf90208fb5c66c0ee3a7651b76a7628d" Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.057785 4769 status_manager.go:851] "Failed to get status for pod" podUID="9441dbc7-716c-413e-b0ea-bf1ef05b1608" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-64569bb78d-pzqdq\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.058228 4769 status_manager.go:851] "Failed to get status for pod" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" pod="openshift-marketplace/community-operators-59lwg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-59lwg\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.058509 4769 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:20 crc kubenswrapper[4769]: I1125 11:08:20.058731 4769 status_manager.go:851] "Failed to get status for pod" podUID="930d9174-b8de-465e-a1b7-b9aa7c498246" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operators-redhat/pods/loki-operator-controller-manager-cbd48d4d7-8psr6\": dial tcp 38.102.83.201:6443: connect: connection refused" Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.073453 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2ece089e02db05ba63b88232ae34f6b9052d6ebf701b922a991e6deb628251c0"} Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.074059 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.074101 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"eb1ec27cf208815a555da6d7cbd2dfd02f1ec83e8dd807ef2d7458606ce07c40"} Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.073690 4769 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8bbf363f-faf3-45da-a19f-54cc0119825c" Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.074146 4769 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8bbf363f-faf3-45da-a19f-54cc0119825c" Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.074126 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d07d51a8f9208aa172c7e1d934c7f8a681d2b8669db24262d7ce4347896e7404"} Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.076595 4769 generic.go:334] "Generic (PLEG): container finished" podID="9441dbc7-716c-413e-b0ea-bf1ef05b1608" containerID="4e20ea5973bec117853583cca2f6ed9ac917d292b5b14f4ca82a8a2adaf6b30b" exitCode=1 Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.076652 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" event={"ID":"9441dbc7-716c-413e-b0ea-bf1ef05b1608","Type":"ContainerDied","Data":"4e20ea5973bec117853583cca2f6ed9ac917d292b5b14f4ca82a8a2adaf6b30b"} Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.076710 4769 scope.go:117] "RemoveContainer" containerID="4ab3af93f3df09f3bd9984b319746b513e6c0b970fbaaafbf6a6c79973534d0b" Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.079019 4769 scope.go:117] "RemoveContainer" containerID="4e20ea5973bec117853583cca2f6ed9ac917d292b5b14f4ca82a8a2adaf6b30b" Nov 25 11:08:21 crc kubenswrapper[4769]: E1125 11:08:21.079846 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-64569bb78d-pzqdq_metallb-system(9441dbc7-716c-413e-b0ea-bf1ef05b1608)\"" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" podUID="9441dbc7-716c-413e-b0ea-bf1ef05b1608" Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.080250 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" event={"ID":"930d9174-b8de-465e-a1b7-b9aa7c498246","Type":"ContainerStarted","Data":"95685d7fce95521ab9c980a4b524439bf2e649a86c4f8349f445b89eece829a9"} Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.080655 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.095464 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.095757 4769 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde" exitCode=1 Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.095849 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde"} Nov 25 11:08:21 crc kubenswrapper[4769]: I1125 11:08:21.096682 4769 scope.go:117] "RemoveContainer" containerID="276d782fdb40c9e53c3a78cd399c73836f24fa8afc255526d23b51fe3c983cde" Nov 25 11:08:22 crc kubenswrapper[4769]: I1125 11:08:22.109426 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 11:08:22 crc kubenswrapper[4769]: I1125 11:08:22.110085 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"30f9e3aeaf2ece0399411c53376dfaeb18bfcef9b5f55b5971cccd47178384f1"} Nov 25 11:08:22 crc kubenswrapper[4769]: I1125 11:08:22.122000 4769 scope.go:117] "RemoveContainer" containerID="4e20ea5973bec117853583cca2f6ed9ac917d292b5b14f4ca82a8a2adaf6b30b" Nov 25 11:08:22 crc kubenswrapper[4769]: E1125 11:08:22.122264 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-64569bb78d-pzqdq_metallb-system(9441dbc7-716c-413e-b0ea-bf1ef05b1608)\"" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" podUID="9441dbc7-716c-413e-b0ea-bf1ef05b1608" Nov 25 11:08:22 crc kubenswrapper[4769]: I1125 11:08:22.290927 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:08:22 crc kubenswrapper[4769]: I1125 11:08:22.291003 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:08:23 crc kubenswrapper[4769]: I1125 11:08:23.270560 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:23 crc kubenswrapper[4769]: I1125 11:08:23.270923 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:23 crc kubenswrapper[4769]: I1125 11:08:23.280052 4769 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]log ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]etcd ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/generic-apiserver-start-informers ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/priority-and-fairness-filter ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/start-apiextensions-informers ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/start-apiextensions-controllers ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/crd-informer-synced ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/start-system-namespaces-controller ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 25 11:08:23 crc kubenswrapper[4769]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 25 11:08:23 crc kubenswrapper[4769]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/bootstrap-controller ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/start-kube-aggregator-informers ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/apiservice-registration-controller ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/apiservice-discovery-controller ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]autoregister-completion ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/apiservice-openapi-controller ok Nov 25 11:08:23 crc kubenswrapper[4769]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 25 11:08:23 crc kubenswrapper[4769]: livez check failed Nov 25 11:08:23 crc kubenswrapper[4769]: I1125 11:08:23.280111 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 11:08:25 crc kubenswrapper[4769]: I1125 11:08:25.672106 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 11:08:26 crc kubenswrapper[4769]: I1125 11:08:26.846083 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 11:08:27 crc kubenswrapper[4769]: I1125 11:08:27.530986 4769 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:27 crc kubenswrapper[4769]: I1125 11:08:27.687025 4769 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6a169195-02f2-4e4c-8328-0b6cfa4d8a97" Nov 25 11:08:28 crc kubenswrapper[4769]: I1125 11:08:28.186607 4769 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8bbf363f-faf3-45da-a19f-54cc0119825c" Nov 25 11:08:28 crc kubenswrapper[4769]: I1125 11:08:28.186637 4769 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8bbf363f-faf3-45da-a19f-54cc0119825c" Nov 25 11:08:28 crc kubenswrapper[4769]: I1125 11:08:28.190366 4769 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6a169195-02f2-4e4c-8328-0b6cfa4d8a97" Nov 25 11:08:29 crc kubenswrapper[4769]: I1125 11:08:29.204532 4769 generic.go:334] "Generic (PLEG): container finished" podID="18e1910e-52b2-439b-a93f-4ffe63a7b992" containerID="d8cd54e9de5c533f7a6eaf9a573521727cbf6c52978bbd41e5fe7af0bf47f07c" exitCode=1 Nov 25 11:08:29 crc kubenswrapper[4769]: I1125 11:08:29.204612 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" event={"ID":"18e1910e-52b2-439b-a93f-4ffe63a7b992","Type":"ContainerDied","Data":"d8cd54e9de5c533f7a6eaf9a573521727cbf6c52978bbd41e5fe7af0bf47f07c"} Nov 25 11:08:29 crc kubenswrapper[4769]: I1125 11:08:29.205669 4769 scope.go:117] "RemoveContainer" containerID="d8cd54e9de5c533f7a6eaf9a573521727cbf6c52978bbd41e5fe7af0bf47f07c" Nov 25 11:08:29 crc kubenswrapper[4769]: I1125 11:08:29.209434 4769 generic.go:334] "Generic (PLEG): container finished" podID="6d9be953-34ea-4956-96bf-84d5f8babb2d" containerID="0ae0674b08faa57f993ad26e7715a336d1995e72605f6c44f88026dd6dbc0a02" exitCode=1 Nov 25 11:08:29 crc kubenswrapper[4769]: I1125 11:08:29.209496 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" event={"ID":"6d9be953-34ea-4956-96bf-84d5f8babb2d","Type":"ContainerDied","Data":"0ae0674b08faa57f993ad26e7715a336d1995e72605f6c44f88026dd6dbc0a02"} Nov 25 11:08:29 crc kubenswrapper[4769]: I1125 11:08:29.210479 4769 scope.go:117] "RemoveContainer" containerID="0ae0674b08faa57f993ad26e7715a336d1995e72605f6c44f88026dd6dbc0a02" Nov 25 11:08:29 crc kubenswrapper[4769]: I1125 11:08:29.216311 4769 generic.go:334] "Generic (PLEG): container finished" podID="a032414e-4be2-47f7-ac88-3bdec0ccb151" containerID="39f483ab6967bf5b12b80bbafec1e9b21fe98906c3ce2a5f1956a4b7f80f92de" exitCode=1 Nov 25 11:08:29 crc kubenswrapper[4769]: I1125 11:08:29.216357 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" event={"ID":"a032414e-4be2-47f7-ac88-3bdec0ccb151","Type":"ContainerDied","Data":"39f483ab6967bf5b12b80bbafec1e9b21fe98906c3ce2a5f1956a4b7f80f92de"} Nov 25 11:08:29 crc kubenswrapper[4769]: I1125 11:08:29.217101 4769 scope.go:117] "RemoveContainer" containerID="39f483ab6967bf5b12b80bbafec1e9b21fe98906c3ce2a5f1956a4b7f80f92de" Nov 25 11:08:29 crc kubenswrapper[4769]: I1125 11:08:29.788851 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 11:08:29 crc kubenswrapper[4769]: I1125 11:08:29.794532 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.262471 4769 generic.go:334] "Generic (PLEG): container finished" podID="aaa65e3e-75e2-4f50-b9d6-aa9710a6e394" containerID="7c3b83fb330e4e91f8d36af736ddc4b23b1e75845f4ea765e11ec77d28f9619c" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.264406 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" event={"ID":"aaa65e3e-75e2-4f50-b9d6-aa9710a6e394","Type":"ContainerDied","Data":"7c3b83fb330e4e91f8d36af736ddc4b23b1e75845f4ea765e11ec77d28f9619c"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.265983 4769 scope.go:117] "RemoveContainer" containerID="7c3b83fb330e4e91f8d36af736ddc4b23b1e75845f4ea765e11ec77d28f9619c" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.277775 4769 generic.go:334] "Generic (PLEG): container finished" podID="67abea47-5e8a-43a2-8865-929cfdfc607c" containerID="b91c9f589a2efc6abf9221b2ac5a1520cdfe6e110dd005fda6c245342058b4e0" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.277915 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" event={"ID":"67abea47-5e8a-43a2-8865-929cfdfc607c","Type":"ContainerDied","Data":"b91c9f589a2efc6abf9221b2ac5a1520cdfe6e110dd005fda6c245342058b4e0"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.281693 4769 generic.go:334] "Generic (PLEG): container finished" podID="6d9be953-34ea-4956-96bf-84d5f8babb2d" containerID="93feba38754e7e7cf6fd7ce94bfbe24770aa18f243ce1c1b5ddd2f51902c5ec9" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.281807 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" event={"ID":"6d9be953-34ea-4956-96bf-84d5f8babb2d","Type":"ContainerDied","Data":"93feba38754e7e7cf6fd7ce94bfbe24770aa18f243ce1c1b5ddd2f51902c5ec9"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.281868 4769 scope.go:117] "RemoveContainer" containerID="0ae0674b08faa57f993ad26e7715a336d1995e72605f6c44f88026dd6dbc0a02" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.282864 4769 scope.go:117] "RemoveContainer" containerID="93feba38754e7e7cf6fd7ce94bfbe24770aa18f243ce1c1b5ddd2f51902c5ec9" Nov 25 11:08:30 crc kubenswrapper[4769]: E1125 11:08:30.283793 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-7c57c8bbc4-rgn2z_openstack-operators(6d9be953-34ea-4956-96bf-84d5f8babb2d)\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" podUID="6d9be953-34ea-4956-96bf-84d5f8babb2d" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.285403 4769 scope.go:117] "RemoveContainer" containerID="b91c9f589a2efc6abf9221b2ac5a1520cdfe6e110dd005fda6c245342058b4e0" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.290467 4769 generic.go:334] "Generic (PLEG): container finished" podID="169b77e8-e7b0-4d11-9915-2442f48d9347" containerID="c819ffdb79867617900c0ddd11447953f5677fd1b9d122c17ebdeb83cbd8965a" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.290565 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" event={"ID":"169b77e8-e7b0-4d11-9915-2442f48d9347","Type":"ContainerDied","Data":"c819ffdb79867617900c0ddd11447953f5677fd1b9d122c17ebdeb83cbd8965a"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.292890 4769 scope.go:117] "RemoveContainer" containerID="c819ffdb79867617900c0ddd11447953f5677fd1b9d122c17ebdeb83cbd8965a" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.311131 4769 generic.go:334] "Generic (PLEG): container finished" podID="1da948d8-e834-488a-a3ec-a0c0229ebaf5" containerID="946ca39c4af07e3b66da42eb0fe6d94359578b66083e6fe55433cc33fabbf671" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.311236 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" event={"ID":"1da948d8-e834-488a-a3ec-a0c0229ebaf5","Type":"ContainerDied","Data":"946ca39c4af07e3b66da42eb0fe6d94359578b66083e6fe55433cc33fabbf671"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.312405 4769 scope.go:117] "RemoveContainer" containerID="946ca39c4af07e3b66da42eb0fe6d94359578b66083e6fe55433cc33fabbf671" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.322456 4769 generic.go:334] "Generic (PLEG): container finished" podID="88cb8ad7-c855-45eb-a471-aacb8c42082c" containerID="396c9684073cbf9cfbac8196a83452dc17e63a68e71e196a5bd3cfd3d00a3ff8" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.322537 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" event={"ID":"88cb8ad7-c855-45eb-a471-aacb8c42082c","Type":"ContainerDied","Data":"396c9684073cbf9cfbac8196a83452dc17e63a68e71e196a5bd3cfd3d00a3ff8"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.332162 4769 scope.go:117] "RemoveContainer" containerID="396c9684073cbf9cfbac8196a83452dc17e63a68e71e196a5bd3cfd3d00a3ff8" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.344870 4769 generic.go:334] "Generic (PLEG): container finished" podID="63e66921-47a7-407a-b50e-06cf5cadb8be" containerID="e2bc390b476e8b5842c57ead30722b9aea8807b2d2505542b913b2fb7ae56e6a" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.344979 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" event={"ID":"63e66921-47a7-407a-b50e-06cf5cadb8be","Type":"ContainerDied","Data":"e2bc390b476e8b5842c57ead30722b9aea8807b2d2505542b913b2fb7ae56e6a"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.346636 4769 scope.go:117] "RemoveContainer" containerID="e2bc390b476e8b5842c57ead30722b9aea8807b2d2505542b913b2fb7ae56e6a" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.350369 4769 generic.go:334] "Generic (PLEG): container finished" podID="1f5735dc-67e5-423c-9a8f-d42977c892d3" containerID="e17e389fbb7c359261990f8efde39476099fe9c541d058445d5c07710d2cdbad" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.350426 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" event={"ID":"1f5735dc-67e5-423c-9a8f-d42977c892d3","Type":"ContainerDied","Data":"e17e389fbb7c359261990f8efde39476099fe9c541d058445d5c07710d2cdbad"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.351104 4769 scope.go:117] "RemoveContainer" containerID="e17e389fbb7c359261990f8efde39476099fe9c541d058445d5c07710d2cdbad" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.357068 4769 generic.go:334] "Generic (PLEG): container finished" podID="18e1910e-52b2-439b-a93f-4ffe63a7b992" containerID="32e2745bbc5cdad2789665569e0dc2773da3459d31ff638abb7caa8d8eba887b" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.357153 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" event={"ID":"18e1910e-52b2-439b-a93f-4ffe63a7b992","Type":"ContainerDied","Data":"32e2745bbc5cdad2789665569e0dc2773da3459d31ff638abb7caa8d8eba887b"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.359163 4769 scope.go:117] "RemoveContainer" containerID="32e2745bbc5cdad2789665569e0dc2773da3459d31ff638abb7caa8d8eba887b" Nov 25 11:08:30 crc kubenswrapper[4769]: E1125 11:08:30.359516 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-wssdn_openstack-operators(18e1910e-52b2-439b-a93f-4ffe63a7b992)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" podUID="18e1910e-52b2-439b-a93f-4ffe63a7b992" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.365559 4769 generic.go:334] "Generic (PLEG): container finished" podID="b59cac8b-fb36-4316-ab83-da7202b67af5" containerID="1a9b8362a3d604343b360e638d70bb8430a8d47fbf9df6e616d5d01ebe8d8afd" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.365619 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" event={"ID":"b59cac8b-fb36-4316-ab83-da7202b67af5","Type":"ContainerDied","Data":"1a9b8362a3d604343b360e638d70bb8430a8d47fbf9df6e616d5d01ebe8d8afd"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.366383 4769 scope.go:117] "RemoveContainer" containerID="1a9b8362a3d604343b360e638d70bb8430a8d47fbf9df6e616d5d01ebe8d8afd" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.380671 4769 generic.go:334] "Generic (PLEG): container finished" podID="b90cc789-8211-48bc-85cc-1a31ad1af486" containerID="ac289fa8350a70a6ad5e0d6a6e2d17b36cce60a1cca3f86da81a58a9be17bc34" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.380790 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" event={"ID":"b90cc789-8211-48bc-85cc-1a31ad1af486","Type":"ContainerDied","Data":"ac289fa8350a70a6ad5e0d6a6e2d17b36cce60a1cca3f86da81a58a9be17bc34"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.384716 4769 generic.go:334] "Generic (PLEG): container finished" podID="c21f4ff5-86fa-44f0-993f-59189de57182" containerID="9682b8b8fcb342c38db68ed7d793776921b15502a14e641349498956962b5ce4" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.384795 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" event={"ID":"c21f4ff5-86fa-44f0-993f-59189de57182","Type":"ContainerDied","Data":"9682b8b8fcb342c38db68ed7d793776921b15502a14e641349498956962b5ce4"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.387323 4769 scope.go:117] "RemoveContainer" containerID="9682b8b8fcb342c38db68ed7d793776921b15502a14e641349498956962b5ce4" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.388431 4769 generic.go:334] "Generic (PLEG): container finished" podID="1b8cd25d-43dd-4774-b1d9-59572bb6bef7" containerID="ed06562448baa51835a8cf1f3e807c9075497978a76b10bd16176ef8d2f1f3b4" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.388506 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" event={"ID":"1b8cd25d-43dd-4774-b1d9-59572bb6bef7","Type":"ContainerDied","Data":"ed06562448baa51835a8cf1f3e807c9075497978a76b10bd16176ef8d2f1f3b4"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.389885 4769 scope.go:117] "RemoveContainer" containerID="ed06562448baa51835a8cf1f3e807c9075497978a76b10bd16176ef8d2f1f3b4" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.394278 4769 scope.go:117] "RemoveContainer" containerID="ac289fa8350a70a6ad5e0d6a6e2d17b36cce60a1cca3f86da81a58a9be17bc34" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.397878 4769 generic.go:334] "Generic (PLEG): container finished" podID="a032414e-4be2-47f7-ac88-3bdec0ccb151" containerID="cd7b3e66133ef9e6cf910abf987e916bbca2ff088f170e7afdf537fa37db392e" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.397925 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" event={"ID":"a032414e-4be2-47f7-ac88-3bdec0ccb151","Type":"ContainerDied","Data":"cd7b3e66133ef9e6cf910abf987e916bbca2ff088f170e7afdf537fa37db392e"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.398848 4769 scope.go:117] "RemoveContainer" containerID="cd7b3e66133ef9e6cf910abf987e916bbca2ff088f170e7afdf537fa37db392e" Nov 25 11:08:30 crc kubenswrapper[4769]: E1125 11:08:30.399195 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-bzrbr_openstack-operators(a032414e-4be2-47f7-ac88-3bdec0ccb151)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" podUID="a032414e-4be2-47f7-ac88-3bdec0ccb151" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.406189 4769 generic.go:334] "Generic (PLEG): container finished" podID="1be22f03-8697-413b-922c-9344185c05c4" containerID="7517f359f23085b907717c524af1b29960d56f40646f99c57e3a8a7751cfdba4" exitCode=1 Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.407599 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" event={"ID":"1be22f03-8697-413b-922c-9344185c05c4","Type":"ContainerDied","Data":"7517f359f23085b907717c524af1b29960d56f40646f99c57e3a8a7751cfdba4"} Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.408559 4769 scope.go:117] "RemoveContainer" containerID="7517f359f23085b907717c524af1b29960d56f40646f99c57e3a8a7751cfdba4" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.678233 4769 scope.go:117] "RemoveContainer" containerID="d8cd54e9de5c533f7a6eaf9a573521727cbf6c52978bbd41e5fe7af0bf47f07c" Nov 25 11:08:30 crc kubenswrapper[4769]: I1125 11:08:30.935121 4769 scope.go:117] "RemoveContainer" containerID="39f483ab6967bf5b12b80bbafec1e9b21fe98906c3ce2a5f1956a4b7f80f92de" Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.422315 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2f1ad69-27e4-4131-a742-a8d2c5df8636" containerID="00477b785738787aa94d9a777223971820e93dafb0bf8688e595b5cf3205784e" exitCode=1 Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.422652 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" event={"ID":"a2f1ad69-27e4-4131-a742-a8d2c5df8636","Type":"ContainerDied","Data":"00477b785738787aa94d9a777223971820e93dafb0bf8688e595b5cf3205784e"} Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.423541 4769 scope.go:117] "RemoveContainer" containerID="00477b785738787aa94d9a777223971820e93dafb0bf8688e595b5cf3205784e" Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.428823 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" event={"ID":"88cb8ad7-c855-45eb-a471-aacb8c42082c","Type":"ContainerStarted","Data":"a23fcc71d50f18f3fe01708b8edccb8721556587c427281e3b2e049cdbb319ad"} Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.429231 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.431589 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" event={"ID":"67abea47-5e8a-43a2-8865-929cfdfc607c","Type":"ContainerStarted","Data":"6846c874242d9e6c04dcab0a1fe89dd686acb8a1f3b9e35b6e6e4a9c1b5f5618"} Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.433312 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.434795 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" event={"ID":"1b8cd25d-43dd-4774-b1d9-59572bb6bef7","Type":"ContainerStarted","Data":"4c5e7e950f6538b39e9f699e1a741178dc60284160600dbf7845ddbf614e82cd"} Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.435457 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.442000 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" event={"ID":"63e66921-47a7-407a-b50e-06cf5cadb8be","Type":"ContainerStarted","Data":"39d9302b3639f4ddffeaa35d2e8fc70b20f5c0cc9b12bbd32022210155aaad67"} Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.443174 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.460813 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" event={"ID":"1da948d8-e834-488a-a3ec-a0c0229ebaf5","Type":"ContainerStarted","Data":"19aa616ade2c53210e39d6c8147c08b8a7b9cc9666a609ef0e2a2e1b58af8099"} Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.461488 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.464159 4769 generic.go:334] "Generic (PLEG): container finished" podID="6520a852-60ef-47d1-800b-633eae1655dd" containerID="25b080e51b398419543c27866dcc0fd85787fc6c99edbd8530e8e717eea1ab53" exitCode=1 Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.464269 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" event={"ID":"6520a852-60ef-47d1-800b-633eae1655dd","Type":"ContainerDied","Data":"25b080e51b398419543c27866dcc0fd85787fc6c99edbd8530e8e717eea1ab53"} Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.465201 4769 scope.go:117] "RemoveContainer" containerID="25b080e51b398419543c27866dcc0fd85787fc6c99edbd8530e8e717eea1ab53" Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.468143 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" event={"ID":"169b77e8-e7b0-4d11-9915-2442f48d9347","Type":"ContainerStarted","Data":"ba2c66990cb91e68697628c0f3b547e8873ad166897c547a0344ad651d3e5fe5"} Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.468849 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.474497 4769 generic.go:334] "Generic (PLEG): container finished" podID="136f8f90-5673-4a08-ab4b-c030c1c428a6" containerID="3a1d89eab4ebc4eb141327b74163cc1f4e865b034ffb2645df93f4aef500b974" exitCode=1 Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.474565 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" event={"ID":"136f8f90-5673-4a08-ab4b-c030c1c428a6","Type":"ContainerDied","Data":"3a1d89eab4ebc4eb141327b74163cc1f4e865b034ffb2645df93f4aef500b974"} Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.475480 4769 scope.go:117] "RemoveContainer" containerID="3a1d89eab4ebc4eb141327b74163cc1f4e865b034ffb2645df93f4aef500b974" Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.479823 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" event={"ID":"1f5735dc-67e5-423c-9a8f-d42977c892d3","Type":"ContainerStarted","Data":"80a8ac840c40863e509142f82abbd1eac0a87057401260cd1cf0950115419adf"} Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.480637 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.488191 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" event={"ID":"b59cac8b-fb36-4316-ab83-da7202b67af5","Type":"ContainerStarted","Data":"6d68c8b35baf981f2d94ddadea913524158f563f9d15ef8038e939ee85cb6bc5"} Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.499587 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" event={"ID":"aaa65e3e-75e2-4f50-b9d6-aa9710a6e394","Type":"ContainerStarted","Data":"d9746733b1af68e57c0f0b4acdb234fe445aa5b0e603e575247719cfbbf4f531"} Nov 25 11:08:31 crc kubenswrapper[4769]: I1125 11:08:31.499873 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.520325 4769 generic.go:334] "Generic (PLEG): container finished" podID="1be22f03-8697-413b-922c-9344185c05c4" containerID="935b97555d27421b14befc4592ae226765148116fac79ea0f3e09138aa321842" exitCode=1 Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.520405 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" event={"ID":"1be22f03-8697-413b-922c-9344185c05c4","Type":"ContainerDied","Data":"935b97555d27421b14befc4592ae226765148116fac79ea0f3e09138aa321842"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.520980 4769 scope.go:117] "RemoveContainer" containerID="7517f359f23085b907717c524af1b29960d56f40646f99c57e3a8a7751cfdba4" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.521782 4769 scope.go:117] "RemoveContainer" containerID="935b97555d27421b14befc4592ae226765148116fac79ea0f3e09138aa321842" Nov 25 11:08:32 crc kubenswrapper[4769]: E1125 11:08:32.524014 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-g8xqz_openstack-operators(1be22f03-8697-413b-922c-9344185c05c4)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" podUID="1be22f03-8697-413b-922c-9344185c05c4" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.530207 4769 generic.go:334] "Generic (PLEG): container finished" podID="0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd" containerID="6c148f7c3df14f45a0d8f4337fe0cdd97c4a74585964db421c37af0e242000af" exitCode=1 Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.530271 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" event={"ID":"0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd","Type":"ContainerDied","Data":"6c148f7c3df14f45a0d8f4337fe0cdd97c4a74585964db421c37af0e242000af"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.531104 4769 scope.go:117] "RemoveContainer" containerID="6c148f7c3df14f45a0d8f4337fe0cdd97c4a74585964db421c37af0e242000af" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.534577 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" event={"ID":"6520a852-60ef-47d1-800b-633eae1655dd","Type":"ContainerStarted","Data":"fc086567eb5af4b09d2014dc3d0ef65b4a1b609b972e1fe32924c155155ddab5"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.535275 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.538408 4769 generic.go:334] "Generic (PLEG): container finished" podID="4894deb0-65ca-4b42-b397-4092a75739c9" containerID="2a082f7c1ba5b377707c21505ffb93a704670d0c971db75262950820a1188ed5" exitCode=1 Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.538483 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" event={"ID":"4894deb0-65ca-4b42-b397-4092a75739c9","Type":"ContainerDied","Data":"2a082f7c1ba5b377707c21505ffb93a704670d0c971db75262950820a1188ed5"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.539581 4769 scope.go:117] "RemoveContainer" containerID="2a082f7c1ba5b377707c21505ffb93a704670d0c971db75262950820a1188ed5" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.543416 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" event={"ID":"a2f1ad69-27e4-4131-a742-a8d2c5df8636","Type":"ContainerStarted","Data":"74e51c5514768f63cf30779a9a9ff75255ce78baf6763035fc39525e117305ba"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.543868 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.582132 4769 generic.go:334] "Generic (PLEG): container finished" podID="67abea47-5e8a-43a2-8865-929cfdfc607c" containerID="6846c874242d9e6c04dcab0a1fe89dd686acb8a1f3b9e35b6e6e4a9c1b5f5618" exitCode=1 Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.582440 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" event={"ID":"67abea47-5e8a-43a2-8865-929cfdfc607c","Type":"ContainerDied","Data":"6846c874242d9e6c04dcab0a1fe89dd686acb8a1f3b9e35b6e6e4a9c1b5f5618"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.583204 4769 scope.go:117] "RemoveContainer" containerID="6846c874242d9e6c04dcab0a1fe89dd686acb8a1f3b9e35b6e6e4a9c1b5f5618" Nov 25 11:08:32 crc kubenswrapper[4769]: E1125 11:08:32.583469 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-w8ftq_openstack-operators(67abea47-5e8a-43a2-8865-929cfdfc607c)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" podUID="67abea47-5e8a-43a2-8865-929cfdfc607c" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.593797 4769 generic.go:334] "Generic (PLEG): container finished" podID="63e66921-47a7-407a-b50e-06cf5cadb8be" containerID="39d9302b3639f4ddffeaa35d2e8fc70b20f5c0cc9b12bbd32022210155aaad67" exitCode=1 Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.593876 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" event={"ID":"63e66921-47a7-407a-b50e-06cf5cadb8be","Type":"ContainerDied","Data":"39d9302b3639f4ddffeaa35d2e8fc70b20f5c0cc9b12bbd32022210155aaad67"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.594464 4769 scope.go:117] "RemoveContainer" containerID="39d9302b3639f4ddffeaa35d2e8fc70b20f5c0cc9b12bbd32022210155aaad67" Nov 25 11:08:32 crc kubenswrapper[4769]: E1125 11:08:32.595414 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-58bb8d67cc-qdjsd_openstack-operators(63e66921-47a7-407a-b50e-06cf5cadb8be)\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" podUID="63e66921-47a7-407a-b50e-06cf5cadb8be" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.606082 4769 generic.go:334] "Generic (PLEG): container finished" podID="1f5735dc-67e5-423c-9a8f-d42977c892d3" containerID="80a8ac840c40863e509142f82abbd1eac0a87057401260cd1cf0950115419adf" exitCode=1 Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.606131 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" event={"ID":"1f5735dc-67e5-423c-9a8f-d42977c892d3","Type":"ContainerDied","Data":"80a8ac840c40863e509142f82abbd1eac0a87057401260cd1cf0950115419adf"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.606578 4769 scope.go:117] "RemoveContainer" containerID="80a8ac840c40863e509142f82abbd1eac0a87057401260cd1cf0950115419adf" Nov 25 11:08:32 crc kubenswrapper[4769]: E1125 11:08:32.606816 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-hh9nc_openstack-operators(1f5735dc-67e5-423c-9a8f-d42977c892d3)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" podUID="1f5735dc-67e5-423c-9a8f-d42977c892d3" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.609259 4769 generic.go:334] "Generic (PLEG): container finished" podID="88cb8ad7-c855-45eb-a471-aacb8c42082c" containerID="a23fcc71d50f18f3fe01708b8edccb8721556587c427281e3b2e049cdbb319ad" exitCode=1 Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.609299 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" event={"ID":"88cb8ad7-c855-45eb-a471-aacb8c42082c","Type":"ContainerDied","Data":"a23fcc71d50f18f3fe01708b8edccb8721556587c427281e3b2e049cdbb319ad"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.609599 4769 scope.go:117] "RemoveContainer" containerID="a23fcc71d50f18f3fe01708b8edccb8721556587c427281e3b2e049cdbb319ad" Nov 25 11:08:32 crc kubenswrapper[4769]: E1125 11:08:32.609804 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-kbhzd_openstack-operators(88cb8ad7-c855-45eb-a471-aacb8c42082c)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" podUID="88cb8ad7-c855-45eb-a471-aacb8c42082c" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.613681 4769 generic.go:334] "Generic (PLEG): container finished" podID="b90cc789-8211-48bc-85cc-1a31ad1af486" containerID="3136679e457ea57a5443a8478eb8b66b5f5380f21d180fc6949b77b7dfce4d4a" exitCode=1 Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.613768 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" event={"ID":"b90cc789-8211-48bc-85cc-1a31ad1af486","Type":"ContainerDied","Data":"3136679e457ea57a5443a8478eb8b66b5f5380f21d180fc6949b77b7dfce4d4a"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.614566 4769 scope.go:117] "RemoveContainer" containerID="3136679e457ea57a5443a8478eb8b66b5f5380f21d180fc6949b77b7dfce4d4a" Nov 25 11:08:32 crc kubenswrapper[4769]: E1125 11:08:32.614802 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-zv5xr_openstack-operators(b90cc789-8211-48bc-85cc-1a31ad1af486)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" podUID="b90cc789-8211-48bc-85cc-1a31ad1af486" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.616554 4769 generic.go:334] "Generic (PLEG): container finished" podID="1da948d8-e834-488a-a3ec-a0c0229ebaf5" containerID="19aa616ade2c53210e39d6c8147c08b8a7b9cc9666a609ef0e2a2e1b58af8099" exitCode=1 Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.616608 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" event={"ID":"1da948d8-e834-488a-a3ec-a0c0229ebaf5","Type":"ContainerDied","Data":"19aa616ade2c53210e39d6c8147c08b8a7b9cc9666a609ef0e2a2e1b58af8099"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.616935 4769 scope.go:117] "RemoveContainer" containerID="19aa616ade2c53210e39d6c8147c08b8a7b9cc9666a609ef0e2a2e1b58af8099" Nov 25 11:08:32 crc kubenswrapper[4769]: E1125 11:08:32.617174 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-m2qxd_openstack-operators(1da948d8-e834-488a-a3ec-a0c0229ebaf5)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" podUID="1da948d8-e834-488a-a3ec-a0c0229ebaf5" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.620684 4769 generic.go:334] "Generic (PLEG): container finished" podID="aaa65e3e-75e2-4f50-b9d6-aa9710a6e394" containerID="d9746733b1af68e57c0f0b4acdb234fe445aa5b0e603e575247719cfbbf4f531" exitCode=1 Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.620733 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" event={"ID":"aaa65e3e-75e2-4f50-b9d6-aa9710a6e394","Type":"ContainerDied","Data":"d9746733b1af68e57c0f0b4acdb234fe445aa5b0e603e575247719cfbbf4f531"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.621545 4769 scope.go:117] "RemoveContainer" containerID="d9746733b1af68e57c0f0b4acdb234fe445aa5b0e603e575247719cfbbf4f531" Nov 25 11:08:32 crc kubenswrapper[4769]: E1125 11:08:32.621871 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-2hp9n_openstack-operators(aaa65e3e-75e2-4f50-b9d6-aa9710a6e394)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" podUID="aaa65e3e-75e2-4f50-b9d6-aa9710a6e394" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.626229 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" event={"ID":"136f8f90-5673-4a08-ab4b-c030c1c428a6","Type":"ContainerStarted","Data":"9d286c2176c6173a5a0086a2d9310bcc91bcf6f63925c738b22455d7b99e8e29"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.626401 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.629693 4769 scope.go:117] "RemoveContainer" containerID="b91c9f589a2efc6abf9221b2ac5a1520cdfe6e110dd005fda6c245342058b4e0" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.632110 4769 generic.go:334] "Generic (PLEG): container finished" podID="9d0ef7c9-7421-4fe2-b1c8-551253bea174" containerID="6c323d71f968e46de9e8d909661fa4e76f6ea4840e9889b4fa51e11bf032dd62" exitCode=1 Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.632200 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" event={"ID":"9d0ef7c9-7421-4fe2-b1c8-551253bea174","Type":"ContainerDied","Data":"6c323d71f968e46de9e8d909661fa4e76f6ea4840e9889b4fa51e11bf032dd62"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.633197 4769 scope.go:117] "RemoveContainer" containerID="6c323d71f968e46de9e8d909661fa4e76f6ea4840e9889b4fa51e11bf032dd62" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.636466 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" event={"ID":"c21f4ff5-86fa-44f0-993f-59189de57182","Type":"ContainerStarted","Data":"41d400196b844825924007670811fcafb80f49aba4f46cafb3519993c320bdfb"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.637144 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.640444 4769 generic.go:334] "Generic (PLEG): container finished" podID="1b8cd25d-43dd-4774-b1d9-59572bb6bef7" containerID="4c5e7e950f6538b39e9f699e1a741178dc60284160600dbf7845ddbf614e82cd" exitCode=1 Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.641359 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" event={"ID":"1b8cd25d-43dd-4774-b1d9-59572bb6bef7","Type":"ContainerDied","Data":"4c5e7e950f6538b39e9f699e1a741178dc60284160600dbf7845ddbf614e82cd"} Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.642122 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 11:08:32 crc kubenswrapper[4769]: I1125 11:08:32.642539 4769 scope.go:117] "RemoveContainer" containerID="4c5e7e950f6538b39e9f699e1a741178dc60284160600dbf7845ddbf614e82cd" Nov 25 11:08:32 crc kubenswrapper[4769]: E1125 11:08:32.642892 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rcv4n_openstack-operators(1b8cd25d-43dd-4774-b1d9-59572bb6bef7)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" podUID="1b8cd25d-43dd-4774-b1d9-59572bb6bef7" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.613493 4769 scope.go:117] "RemoveContainer" containerID="e2bc390b476e8b5842c57ead30722b9aea8807b2d2505542b913b2fb7ae56e6a" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.657321 4769 generic.go:334] "Generic (PLEG): container finished" podID="8e7436d0-2ff7-4a11-9ab8-74a91e56de4a" containerID="1b86f4ada3b0122a9ae7cd05945e61760cc2f82de35dd3149d4fe9150e15c4cd" exitCode=1 Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.657428 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" event={"ID":"8e7436d0-2ff7-4a11-9ab8-74a91e56de4a","Type":"ContainerDied","Data":"1b86f4ada3b0122a9ae7cd05945e61760cc2f82de35dd3149d4fe9150e15c4cd"} Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.658378 4769 scope.go:117] "RemoveContainer" containerID="1b86f4ada3b0122a9ae7cd05945e61760cc2f82de35dd3149d4fe9150e15c4cd" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.664921 4769 generic.go:334] "Generic (PLEG): container finished" podID="b59cac8b-fb36-4316-ab83-da7202b67af5" containerID="6d68c8b35baf981f2d94ddadea913524158f563f9d15ef8038e939ee85cb6bc5" exitCode=1 Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.665031 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" event={"ID":"b59cac8b-fb36-4316-ab83-da7202b67af5","Type":"ContainerDied","Data":"6d68c8b35baf981f2d94ddadea913524158f563f9d15ef8038e939ee85cb6bc5"} Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.667116 4769 scope.go:117] "RemoveContainer" containerID="6d68c8b35baf981f2d94ddadea913524158f563f9d15ef8038e939ee85cb6bc5" Nov 25 11:08:33 crc kubenswrapper[4769]: E1125 11:08:33.667495 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-47gs9_openstack-operators(b59cac8b-fb36-4316-ab83-da7202b67af5)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" podUID="b59cac8b-fb36-4316-ab83-da7202b67af5" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.680466 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2f1ad69-27e4-4131-a742-a8d2c5df8636" containerID="74e51c5514768f63cf30779a9a9ff75255ce78baf6763035fc39525e117305ba" exitCode=1 Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.680573 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" event={"ID":"a2f1ad69-27e4-4131-a742-a8d2c5df8636","Type":"ContainerDied","Data":"74e51c5514768f63cf30779a9a9ff75255ce78baf6763035fc39525e117305ba"} Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.682411 4769 scope.go:117] "RemoveContainer" containerID="74e51c5514768f63cf30779a9a9ff75255ce78baf6763035fc39525e117305ba" Nov 25 11:08:33 crc kubenswrapper[4769]: E1125 11:08:33.683012 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-66cf5c67ff-68bvf_openstack-operators(a2f1ad69-27e4-4131-a742-a8d2c5df8636)\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" podUID="a2f1ad69-27e4-4131-a742-a8d2c5df8636" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.690807 4769 scope.go:117] "RemoveContainer" containerID="6846c874242d9e6c04dcab0a1fe89dd686acb8a1f3b9e35b6e6e4a9c1b5f5618" Nov 25 11:08:33 crc kubenswrapper[4769]: E1125 11:08:33.691154 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-w8ftq_openstack-operators(67abea47-5e8a-43a2-8865-929cfdfc607c)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" podUID="67abea47-5e8a-43a2-8865-929cfdfc607c" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.700310 4769 generic.go:334] "Generic (PLEG): container finished" podID="136f8f90-5673-4a08-ab4b-c030c1c428a6" containerID="9d286c2176c6173a5a0086a2d9310bcc91bcf6f63925c738b22455d7b99e8e29" exitCode=1 Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.700373 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" event={"ID":"136f8f90-5673-4a08-ab4b-c030c1c428a6","Type":"ContainerDied","Data":"9d286c2176c6173a5a0086a2d9310bcc91bcf6f63925c738b22455d7b99e8e29"} Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.701461 4769 scope.go:117] "RemoveContainer" containerID="9d286c2176c6173a5a0086a2d9310bcc91bcf6f63925c738b22455d7b99e8e29" Nov 25 11:08:33 crc kubenswrapper[4769]: E1125 11:08:33.702136 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-d5cc86f4b-nktn6_openstack-operators(136f8f90-5673-4a08-ab4b-c030c1c428a6)\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" podUID="136f8f90-5673-4a08-ab4b-c030c1c428a6" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.723950 4769 generic.go:334] "Generic (PLEG): container finished" podID="70f81d0a-db58-4bd4-a0e2-ee1c03e2f923" containerID="1121da9064cc527187b8a3a839889c52f4f1b9643b4d85cf4b8fd8e71179d86a" exitCode=1 Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.724126 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" event={"ID":"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923","Type":"ContainerDied","Data":"1121da9064cc527187b8a3a839889c52f4f1b9643b4d85cf4b8fd8e71179d86a"} Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.725448 4769 scope.go:117] "RemoveContainer" containerID="1121da9064cc527187b8a3a839889c52f4f1b9643b4d85cf4b8fd8e71179d86a" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.746128 4769 generic.go:334] "Generic (PLEG): container finished" podID="c1d16c0c-cca9-4794-8a52-c8674d9a069e" containerID="9debed69a3ef6fa17467a54b2c2f95fefc85a71ddae07c61e8776733e2c8d858" exitCode=1 Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.746278 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" event={"ID":"c1d16c0c-cca9-4794-8a52-c8674d9a069e","Type":"ContainerDied","Data":"9debed69a3ef6fa17467a54b2c2f95fefc85a71ddae07c61e8776733e2c8d858"} Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.747408 4769 scope.go:117] "RemoveContainer" containerID="9debed69a3ef6fa17467a54b2c2f95fefc85a71ddae07c61e8776733e2c8d858" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.747578 4769 scope.go:117] "RemoveContainer" containerID="4c5e7e950f6538b39e9f699e1a741178dc60284160600dbf7845ddbf614e82cd" Nov 25 11:08:33 crc kubenswrapper[4769]: E1125 11:08:33.748150 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rcv4n_openstack-operators(1b8cd25d-43dd-4774-b1d9-59572bb6bef7)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" podUID="1b8cd25d-43dd-4774-b1d9-59572bb6bef7" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.748245 4769 scope.go:117] "RemoveContainer" containerID="19aa616ade2c53210e39d6c8147c08b8a7b9cc9666a609ef0e2a2e1b58af8099" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.748508 4769 scope.go:117] "RemoveContainer" containerID="39d9302b3639f4ddffeaa35d2e8fc70b20f5c0cc9b12bbd32022210155aaad67" Nov 25 11:08:33 crc kubenswrapper[4769]: E1125 11:08:33.748929 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-58bb8d67cc-qdjsd_openstack-operators(63e66921-47a7-407a-b50e-06cf5cadb8be)\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" podUID="63e66921-47a7-407a-b50e-06cf5cadb8be" Nov 25 11:08:33 crc kubenswrapper[4769]: E1125 11:08:33.748946 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-m2qxd_openstack-operators(1da948d8-e834-488a-a3ec-a0c0229ebaf5)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" podUID="1da948d8-e834-488a-a3ec-a0c0229ebaf5" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.749237 4769 scope.go:117] "RemoveContainer" containerID="80a8ac840c40863e509142f82abbd1eac0a87057401260cd1cf0950115419adf" Nov 25 11:08:33 crc kubenswrapper[4769]: E1125 11:08:33.749486 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-hh9nc_openstack-operators(1f5735dc-67e5-423c-9a8f-d42977c892d3)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" podUID="1f5735dc-67e5-423c-9a8f-d42977c892d3" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.750060 4769 scope.go:117] "RemoveContainer" containerID="d9746733b1af68e57c0f0b4acdb234fe445aa5b0e603e575247719cfbbf4f531" Nov 25 11:08:33 crc kubenswrapper[4769]: E1125 11:08:33.750269 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-2hp9n_openstack-operators(aaa65e3e-75e2-4f50-b9d6-aa9710a6e394)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" podUID="aaa65e3e-75e2-4f50-b9d6-aa9710a6e394" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.750528 4769 scope.go:117] "RemoveContainer" containerID="a23fcc71d50f18f3fe01708b8edccb8721556587c427281e3b2e049cdbb319ad" Nov 25 11:08:33 crc kubenswrapper[4769]: E1125 11:08:33.750737 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-kbhzd_openstack-operators(88cb8ad7-c855-45eb-a471-aacb8c42082c)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" podUID="88cb8ad7-c855-45eb-a471-aacb8c42082c" Nov 25 11:08:33 crc kubenswrapper[4769]: I1125 11:08:33.780724 4769 scope.go:117] "RemoveContainer" containerID="e17e389fbb7c359261990f8efde39476099fe9c541d058445d5c07710d2cdbad" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.075232 4769 scope.go:117] "RemoveContainer" containerID="396c9684073cbf9cfbac8196a83452dc17e63a68e71e196a5bd3cfd3d00a3ff8" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.184361 4769 scope.go:117] "RemoveContainer" containerID="ac289fa8350a70a6ad5e0d6a6e2d17b36cce60a1cca3f86da81a58a9be17bc34" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.365369 4769 scope.go:117] "RemoveContainer" containerID="946ca39c4af07e3b66da42eb0fe6d94359578b66083e6fe55433cc33fabbf671" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.429504 4769 scope.go:117] "RemoveContainer" containerID="7c3b83fb330e4e91f8d36af736ddc4b23b1e75845f4ea765e11ec77d28f9619c" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.472020 4769 scope.go:117] "RemoveContainer" containerID="ed06562448baa51835a8cf1f3e807c9075497978a76b10bd16176ef8d2f1f3b4" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.577165 4769 scope.go:117] "RemoveContainer" containerID="1a9b8362a3d604343b360e638d70bb8430a8d47fbf9df6e616d5d01ebe8d8afd" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.622771 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-cbd48d4d7-8psr6" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.717742 4769 scope.go:117] "RemoveContainer" containerID="00477b785738787aa94d9a777223971820e93dafb0bf8688e595b5cf3205784e" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.786913 4769 scope.go:117] "RemoveContainer" containerID="6d68c8b35baf981f2d94ddadea913524158f563f9d15ef8038e939ee85cb6bc5" Nov 25 11:08:34 crc kubenswrapper[4769]: E1125 11:08:34.787220 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-47gs9_openstack-operators(b59cac8b-fb36-4316-ab83-da7202b67af5)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" podUID="b59cac8b-fb36-4316-ab83-da7202b67af5" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.808370 4769 scope.go:117] "RemoveContainer" containerID="3a1d89eab4ebc4eb141327b74163cc1f4e865b034ffb2645df93f4aef500b974" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.812273 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" event={"ID":"8e7436d0-2ff7-4a11-9ab8-74a91e56de4a","Type":"ContainerStarted","Data":"75a8806d784335960667ff2525b8d068924d809e301c59b9cebdb36bc3c50322"} Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.814254 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.851298 4769 generic.go:334] "Generic (PLEG): container finished" podID="4894deb0-65ca-4b42-b397-4092a75739c9" containerID="c1e899e7db8749175807d7e9689a02703d96cd41a40b06f86768cd9bc5c54225" exitCode=1 Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.851360 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" event={"ID":"4894deb0-65ca-4b42-b397-4092a75739c9","Type":"ContainerDied","Data":"c1e899e7db8749175807d7e9689a02703d96cd41a40b06f86768cd9bc5c54225"} Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.852343 4769 scope.go:117] "RemoveContainer" containerID="c1e899e7db8749175807d7e9689a02703d96cd41a40b06f86768cd9bc5c54225" Nov 25 11:08:34 crc kubenswrapper[4769]: E1125 11:08:34.852717 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-68b95954c9-k2htr_openstack-operators(4894deb0-65ca-4b42-b397-4092a75739c9)\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" podUID="4894deb0-65ca-4b42-b397-4092a75739c9" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.859452 4769 generic.go:334] "Generic (PLEG): container finished" podID="0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd" containerID="0fd74ffbac90ad0f5f9e8b7c88317416a57234b04509525e230ab6f94b0adc83" exitCode=1 Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.859520 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" event={"ID":"0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd","Type":"ContainerDied","Data":"0fd74ffbac90ad0f5f9e8b7c88317416a57234b04509525e230ab6f94b0adc83"} Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.860376 4769 scope.go:117] "RemoveContainer" containerID="0fd74ffbac90ad0f5f9e8b7c88317416a57234b04509525e230ab6f94b0adc83" Nov 25 11:08:34 crc kubenswrapper[4769]: E1125 11:08:34.860719 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-7xcjk_openstack-operators(0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" podUID="0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.863041 4769 generic.go:334] "Generic (PLEG): container finished" podID="9d0ef7c9-7421-4fe2-b1c8-551253bea174" containerID="8154a6c938b49fa33eff62181bdd315b08ce8beb4a1fe0b024ac73bf728240ee" exitCode=1 Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.863103 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" event={"ID":"9d0ef7c9-7421-4fe2-b1c8-551253bea174","Type":"ContainerDied","Data":"8154a6c938b49fa33eff62181bdd315b08ce8beb4a1fe0b024ac73bf728240ee"} Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.863624 4769 scope.go:117] "RemoveContainer" containerID="8154a6c938b49fa33eff62181bdd315b08ce8beb4a1fe0b024ac73bf728240ee" Nov 25 11:08:34 crc kubenswrapper[4769]: E1125 11:08:34.863987 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-fknhz_openstack-operators(9d0ef7c9-7421-4fe2-b1c8-551253bea174)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" podUID="9d0ef7c9-7421-4fe2-b1c8-551253bea174" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.867889 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" event={"ID":"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923","Type":"ContainerStarted","Data":"bc469b6674d8415450a3a7ac3d780f092d9fec2269ea54e1291e18eff7c1f0b4"} Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.868339 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.874655 4769 scope.go:117] "RemoveContainer" containerID="9d286c2176c6173a5a0086a2d9310bcc91bcf6f63925c738b22455d7b99e8e29" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.874857 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" event={"ID":"c1d16c0c-cca9-4794-8a52-c8674d9a069e","Type":"ContainerStarted","Data":"23d5359c8568a3cd19ca6542473ddedef1de346a9c83e627f43183311089f7da"} Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.875527 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.875923 4769 scope.go:117] "RemoveContainer" containerID="74e51c5514768f63cf30779a9a9ff75255ce78baf6763035fc39525e117305ba" Nov 25 11:08:34 crc kubenswrapper[4769]: E1125 11:08:34.876221 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-66cf5c67ff-68bvf_openstack-operators(a2f1ad69-27e4-4131-a742-a8d2c5df8636)\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" podUID="a2f1ad69-27e4-4131-a742-a8d2c5df8636" Nov 25 11:08:34 crc kubenswrapper[4769]: E1125 11:08:34.876621 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-d5cc86f4b-nktn6_openstack-operators(136f8f90-5673-4a08-ab4b-c030c1c428a6)\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" podUID="136f8f90-5673-4a08-ab4b-c030c1c428a6" Nov 25 11:08:34 crc kubenswrapper[4769]: I1125 11:08:34.953530 4769 scope.go:117] "RemoveContainer" containerID="2a082f7c1ba5b377707c21505ffb93a704670d0c971db75262950820a1188ed5" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.000699 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.001539 4769 scope.go:117] "RemoveContainer" containerID="3136679e457ea57a5443a8478eb8b66b5f5380f21d180fc6949b77b7dfce4d4a" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.001942 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-zv5xr_openstack-operators(b90cc789-8211-48bc-85cc-1a31ad1af486)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" podUID="b90cc789-8211-48bc-85cc-1a31ad1af486" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.002649 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.009578 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.010461 4769 scope.go:117] "RemoveContainer" containerID="d9746733b1af68e57c0f0b4acdb234fe445aa5b0e603e575247719cfbbf4f531" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.010791 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-2hp9n_openstack-operators(aaa65e3e-75e2-4f50-b9d6-aa9710a6e394)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" podUID="aaa65e3e-75e2-4f50-b9d6-aa9710a6e394" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.057741 4769 scope.go:117] "RemoveContainer" containerID="6c148f7c3df14f45a0d8f4337fe0cdd97c4a74585964db421c37af0e242000af" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.086063 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.086129 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.131666 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.131720 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.191354 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.192249 4769 scope.go:117] "RemoveContainer" containerID="6846c874242d9e6c04dcab0a1fe89dd686acb8a1f3b9e35b6e6e4a9c1b5f5618" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.192574 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-w8ftq_openstack-operators(67abea47-5e8a-43a2-8865-929cfdfc607c)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" podUID="67abea47-5e8a-43a2-8865-929cfdfc607c" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.232647 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.233718 4769 scope.go:117] "RemoveContainer" containerID="19aa616ade2c53210e39d6c8147c08b8a7b9cc9666a609ef0e2a2e1b58af8099" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.234105 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-m2qxd_openstack-operators(1da948d8-e834-488a-a3ec-a0c0229ebaf5)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" podUID="1da948d8-e834-488a-a3ec-a0c0229ebaf5" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.462493 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.505747 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.506734 4769 scope.go:117] "RemoveContainer" containerID="80a8ac840c40863e509142f82abbd1eac0a87057401260cd1cf0950115419adf" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.507082 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-hh9nc_openstack-operators(1f5735dc-67e5-423c-9a8f-d42977c892d3)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" podUID="1f5735dc-67e5-423c-9a8f-d42977c892d3" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.556767 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.557801 4769 scope.go:117] "RemoveContainer" containerID="39d9302b3639f4ddffeaa35d2e8fc70b20f5c0cc9b12bbd32022210155aaad67" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.559402 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-58bb8d67cc-qdjsd_openstack-operators(63e66921-47a7-407a-b50e-06cf5cadb8be)\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" podUID="63e66921-47a7-407a-b50e-06cf5cadb8be" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.603069 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.603116 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.671851 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.671939 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.673107 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-state-metrics" containerStatusID={"Type":"cri-o","ID":"39fdd3d49d95b573499934de095fcfcb318fa3d8283bb175fb0e3bf40cc83179"} pod="openstack/kube-state-metrics-0" containerMessage="Container kube-state-metrics failed liveness probe, will be restarted" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.673153 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9" containerName="kube-state-metrics" containerID="cri-o://39fdd3d49d95b573499934de095fcfcb318fa3d8283bb175fb0e3bf40cc83179" gracePeriod=30 Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.693944 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.694013 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.695116 4769 scope.go:117] "RemoveContainer" containerID="93feba38754e7e7cf6fd7ce94bfbe24770aa18f243ce1c1b5ddd2f51902c5ec9" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.695403 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-7c57c8bbc4-rgn2z_openstack-operators(6d9be953-34ea-4956-96bf-84d5f8babb2d)\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" podUID="6d9be953-34ea-4956-96bf-84d5f8babb2d" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.727755 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.728578 4769 scope.go:117] "RemoveContainer" containerID="4c5e7e950f6538b39e9f699e1a741178dc60284160600dbf7845ddbf614e82cd" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.728840 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rcv4n_openstack-operators(1b8cd25d-43dd-4774-b1d9-59572bb6bef7)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" podUID="1b8cd25d-43dd-4774-b1d9-59572bb6bef7" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.818433 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.819403 4769 scope.go:117] "RemoveContainer" containerID="a23fcc71d50f18f3fe01708b8edccb8721556587c427281e3b2e049cdbb319ad" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.820493 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-kbhzd_openstack-operators(88cb8ad7-c855-45eb-a471-aacb8c42082c)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" podUID="88cb8ad7-c855-45eb-a471-aacb8c42082c" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.868581 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.893181 4769 generic.go:334] "Generic (PLEG): container finished" podID="f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9" containerID="39fdd3d49d95b573499934de095fcfcb318fa3d8283bb175fb0e3bf40cc83179" exitCode=2 Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.893265 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9","Type":"ContainerDied","Data":"39fdd3d49d95b573499934de095fcfcb318fa3d8283bb175fb0e3bf40cc83179"} Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.895948 4769 scope.go:117] "RemoveContainer" containerID="c1e899e7db8749175807d7e9689a02703d96cd41a40b06f86768cd9bc5c54225" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.896294 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-68b95954c9-k2htr_openstack-operators(4894deb0-65ca-4b42-b397-4092a75739c9)\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" podUID="4894deb0-65ca-4b42-b397-4092a75739c9" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.900648 4769 scope.go:117] "RemoveContainer" containerID="74e51c5514768f63cf30779a9a9ff75255ce78baf6763035fc39525e117305ba" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.901036 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-66cf5c67ff-68bvf_openstack-operators(a2f1ad69-27e4-4131-a742-a8d2c5df8636)\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" podUID="a2f1ad69-27e4-4131-a742-a8d2c5df8636" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.904828 4769 generic.go:334] "Generic (PLEG): container finished" podID="70f81d0a-db58-4bd4-a0e2-ee1c03e2f923" containerID="bc469b6674d8415450a3a7ac3d780f092d9fec2269ea54e1291e18eff7c1f0b4" exitCode=1 Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.905319 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" event={"ID":"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923","Type":"ContainerDied","Data":"bc469b6674d8415450a3a7ac3d780f092d9fec2269ea54e1291e18eff7c1f0b4"} Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.906864 4769 scope.go:117] "RemoveContainer" containerID="bc469b6674d8415450a3a7ac3d780f092d9fec2269ea54e1291e18eff7c1f0b4" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.907244 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-6c6f9bd7cc-cp48g_openstack-operators(70f81d0a-db58-4bd4-a0e2-ee1c03e2f923)\"" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" podUID="70f81d0a-db58-4bd4-a0e2-ee1c03e2f923" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.913998 4769 generic.go:334] "Generic (PLEG): container finished" podID="c1d16c0c-cca9-4794-8a52-c8674d9a069e" containerID="23d5359c8568a3cd19ca6542473ddedef1de346a9c83e627f43183311089f7da" exitCode=1 Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.914071 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" event={"ID":"c1d16c0c-cca9-4794-8a52-c8674d9a069e","Type":"ContainerDied","Data":"23d5359c8568a3cd19ca6542473ddedef1de346a9c83e627f43183311089f7da"} Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.914901 4769 scope.go:117] "RemoveContainer" containerID="23d5359c8568a3cd19ca6542473ddedef1de346a9c83e627f43183311089f7da" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.915276 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-54cf759cb9-dcqfc_openstack-operators(c1d16c0c-cca9-4794-8a52-c8674d9a069e)\"" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" podUID="c1d16c0c-cca9-4794-8a52-c8674d9a069e" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.917522 4769 generic.go:334] "Generic (PLEG): container finished" podID="8e7436d0-2ff7-4a11-9ab8-74a91e56de4a" containerID="75a8806d784335960667ff2525b8d068924d809e301c59b9cebdb36bc3c50322" exitCode=1 Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.918440 4769 scope.go:117] "RemoveContainer" containerID="3136679e457ea57a5443a8478eb8b66b5f5380f21d180fc6949b77b7dfce4d4a" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.918736 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-zv5xr_openstack-operators(b90cc789-8211-48bc-85cc-1a31ad1af486)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" podUID="b90cc789-8211-48bc-85cc-1a31ad1af486" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.919076 4769 scope.go:117] "RemoveContainer" containerID="75a8806d784335960667ff2525b8d068924d809e301c59b9cebdb36bc3c50322" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.919347 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-rz7fl_openstack-operators(8e7436d0-2ff7-4a11-9ab8-74a91e56de4a)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" podUID="8e7436d0-2ff7-4a11-9ab8-74a91e56de4a" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.919384 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" event={"ID":"8e7436d0-2ff7-4a11-9ab8-74a91e56de4a","Type":"ContainerDied","Data":"75a8806d784335960667ff2525b8d068924d809e301c59b9cebdb36bc3c50322"} Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.919798 4769 scope.go:117] "RemoveContainer" containerID="6d68c8b35baf981f2d94ddadea913524158f563f9d15ef8038e939ee85cb6bc5" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.920028 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-47gs9_openstack-operators(b59cac8b-fb36-4316-ab83-da7202b67af5)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" podUID="b59cac8b-fb36-4316-ab83-da7202b67af5" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.920353 4769 scope.go:117] "RemoveContainer" containerID="8154a6c938b49fa33eff62181bdd315b08ce8beb4a1fe0b024ac73bf728240ee" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.920550 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-fknhz_openstack-operators(9d0ef7c9-7421-4fe2-b1c8-551253bea174)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" podUID="9d0ef7c9-7421-4fe2-b1c8-551253bea174" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.920829 4769 scope.go:117] "RemoveContainer" containerID="0fd74ffbac90ad0f5f9e8b7c88317416a57234b04509525e230ab6f94b0adc83" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.921532 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-7xcjk_openstack-operators(0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" podUID="0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.921616 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.921660 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 11:08:35 crc kubenswrapper[4769]: I1125 11:08:35.922614 4769 scope.go:117] "RemoveContainer" containerID="935b97555d27421b14befc4592ae226765148116fac79ea0f3e09138aa321842" Nov 25 11:08:35 crc kubenswrapper[4769]: E1125 11:08:35.922953 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-g8xqz_openstack-operators(1be22f03-8697-413b-922c-9344185c05c4)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" podUID="1be22f03-8697-413b-922c-9344185c05c4" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.047459 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.048413 4769 scope.go:117] "RemoveContainer" containerID="32e2745bbc5cdad2789665569e0dc2773da3459d31ff638abb7caa8d8eba887b" Nov 25 11:08:36 crc kubenswrapper[4769]: E1125 11:08:36.048720 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-wssdn_openstack-operators(18e1910e-52b2-439b-a93f-4ffe63a7b992)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" podUID="18e1910e-52b2-439b-a93f-4ffe63a7b992" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.050728 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.150080 4769 scope.go:117] "RemoveContainer" containerID="6c323d71f968e46de9e8d909661fa4e76f6ea4840e9889b4fa51e11bf032dd62" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.190470 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.250846 4769 scope.go:117] "RemoveContainer" containerID="1121da9064cc527187b8a3a839889c52f4f1b9643b4d85cf4b8fd8e71179d86a" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.336477 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.382544 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cb74df96-b25q9" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.408917 4769 scope.go:117] "RemoveContainer" containerID="9debed69a3ef6fa17467a54b2c2f95fefc85a71ddae07c61e8776733e2c8d858" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.561283 4769 scope.go:117] "RemoveContainer" containerID="1b86f4ada3b0122a9ae7cd05945e61760cc2f82de35dd3149d4fe9150e15c4cd" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.816894 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.817814 4769 scope.go:117] "RemoveContainer" containerID="9d286c2176c6173a5a0086a2d9310bcc91bcf6f63925c738b22455d7b99e8e29" Nov 25 11:08:36 crc kubenswrapper[4769]: E1125 11:08:36.818078 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-d5cc86f4b-nktn6_openstack-operators(136f8f90-5673-4a08-ab4b-c030c1c428a6)\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" podUID="136f8f90-5673-4a08-ab4b-c030c1c428a6" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.850449 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.934875 4769 scope.go:117] "RemoveContainer" containerID="bc469b6674d8415450a3a7ac3d780f092d9fec2269ea54e1291e18eff7c1f0b4" Nov 25 11:08:36 crc kubenswrapper[4769]: E1125 11:08:36.935434 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-6c6f9bd7cc-cp48g_openstack-operators(70f81d0a-db58-4bd4-a0e2-ee1c03e2f923)\"" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" podUID="70f81d0a-db58-4bd4-a0e2-ee1c03e2f923" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.945446 4769 scope.go:117] "RemoveContainer" containerID="23d5359c8568a3cd19ca6542473ddedef1de346a9c83e627f43183311089f7da" Nov 25 11:08:36 crc kubenswrapper[4769]: E1125 11:08:36.945808 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-54cf759cb9-dcqfc_openstack-operators(c1d16c0c-cca9-4794-8a52-c8674d9a069e)\"" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" podUID="c1d16c0c-cca9-4794-8a52-c8674d9a069e" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.948113 4769 scope.go:117] "RemoveContainer" containerID="75a8806d784335960667ff2525b8d068924d809e301c59b9cebdb36bc3c50322" Nov 25 11:08:36 crc kubenswrapper[4769]: E1125 11:08:36.948438 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-rz7fl_openstack-operators(8e7436d0-2ff7-4a11-9ab8-74a91e56de4a)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" podUID="8e7436d0-2ff7-4a11-9ab8-74a91e56de4a" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.950469 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9","Type":"ContainerStarted","Data":"1d5b649be099053b04184140764d8cafa45a2baf51ef02bcd7e92be816615864"} Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.950595 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 11:08:36 crc kubenswrapper[4769]: I1125 11:08:36.955370 4769 scope.go:117] "RemoveContainer" containerID="32e2745bbc5cdad2789665569e0dc2773da3459d31ff638abb7caa8d8eba887b" Nov 25 11:08:36 crc kubenswrapper[4769]: E1125 11:08:36.955621 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-wssdn_openstack-operators(18e1910e-52b2-439b-a93f-4ffe63a7b992)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" podUID="18e1910e-52b2-439b-a93f-4ffe63a7b992" Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.236681 4769 scope.go:117] "RemoveContainer" containerID="4e20ea5973bec117853583cca2f6ed9ac917d292b5b14f4ca82a8a2adaf6b30b" Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.405482 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.615065 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.966438 4769 generic.go:334] "Generic (PLEG): container finished" podID="9441dbc7-716c-413e-b0ea-bf1ef05b1608" containerID="f682d6ba2b3a76e5b33560b16ccf1b9209e6341d4926ea2e556ef1f28310d774" exitCode=1 Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.966502 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" event={"ID":"9441dbc7-716c-413e-b0ea-bf1ef05b1608","Type":"ContainerDied","Data":"f682d6ba2b3a76e5b33560b16ccf1b9209e6341d4926ea2e556ef1f28310d774"} Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.966846 4769 scope.go:117] "RemoveContainer" containerID="4e20ea5973bec117853583cca2f6ed9ac917d292b5b14f4ca82a8a2adaf6b30b" Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.968090 4769 scope.go:117] "RemoveContainer" containerID="f682d6ba2b3a76e5b33560b16ccf1b9209e6341d4926ea2e556ef1f28310d774" Nov 25 11:08:37 crc kubenswrapper[4769]: E1125 11:08:37.968748 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-64569bb78d-pzqdq_metallb-system(9441dbc7-716c-413e-b0ea-bf1ef05b1608)\"" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" podUID="9441dbc7-716c-413e-b0ea-bf1ef05b1608" Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.969915 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.970576 4769 generic.go:334] "Generic (PLEG): container finished" podID="f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9" containerID="1d5b649be099053b04184140764d8cafa45a2baf51ef02bcd7e92be816615864" exitCode=1 Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.971393 4769 scope.go:117] "RemoveContainer" containerID="23d5359c8568a3cd19ca6542473ddedef1de346a9c83e627f43183311089f7da" Nov 25 11:08:37 crc kubenswrapper[4769]: E1125 11:08:37.971709 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-54cf759cb9-dcqfc_openstack-operators(c1d16c0c-cca9-4794-8a52-c8674d9a069e)\"" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" podUID="c1d16c0c-cca9-4794-8a52-c8674d9a069e" Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.972091 4769 scope.go:117] "RemoveContainer" containerID="1d5b649be099053b04184140764d8cafa45a2baf51ef02bcd7e92be816615864" Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.972425 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9","Type":"ContainerDied","Data":"1d5b649be099053b04184140764d8cafa45a2baf51ef02bcd7e92be816615864"} Nov 25 11:08:37 crc kubenswrapper[4769]: I1125 11:08:37.972807 4769 scope.go:117] "RemoveContainer" containerID="75a8806d784335960667ff2525b8d068924d809e301c59b9cebdb36bc3c50322" Nov 25 11:08:37 crc kubenswrapper[4769]: E1125 11:08:37.973098 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-rz7fl_openstack-operators(8e7436d0-2ff7-4a11-9ab8-74a91e56de4a)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" podUID="8e7436d0-2ff7-4a11-9ab8-74a91e56de4a" Nov 25 11:08:38 crc kubenswrapper[4769]: I1125 11:08:38.044089 4769 scope.go:117] "RemoveContainer" containerID="39fdd3d49d95b573499934de095fcfcb318fa3d8283bb175fb0e3bf40cc83179" Nov 25 11:08:38 crc kubenswrapper[4769]: I1125 11:08:38.181220 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-66n2x" Nov 25 11:08:38 crc kubenswrapper[4769]: I1125 11:08:38.224522 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-qzz5h" Nov 25 11:08:38 crc kubenswrapper[4769]: I1125 11:08:38.327707 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 11:08:38 crc kubenswrapper[4769]: I1125 11:08:38.647076 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 11:08:38 crc kubenswrapper[4769]: I1125 11:08:38.996031 4769 scope.go:117] "RemoveContainer" containerID="f682d6ba2b3a76e5b33560b16ccf1b9209e6341d4926ea2e556ef1f28310d774" Nov 25 11:08:38 crc kubenswrapper[4769]: E1125 11:08:38.996630 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-64569bb78d-pzqdq_metallb-system(9441dbc7-716c-413e-b0ea-bf1ef05b1608)\"" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" podUID="9441dbc7-716c-413e-b0ea-bf1ef05b1608" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.015747 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.070597 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.196394 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.254715 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.379637 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.445219 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.561917 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.652457 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.754396 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.824647 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7fb4d7987d-w6ztr" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.865327 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.925426 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 11:08:39 crc kubenswrapper[4769]: I1125 11:08:39.934914 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.007157 4769 generic.go:334] "Generic (PLEG): container finished" podID="f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9" containerID="8ee70c6e5c00eca7899e4fcf8a105e2ff96534ff2ec995e3b08b51fc6d6c9920" exitCode=1 Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.007201 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9","Type":"ContainerDied","Data":"8ee70c6e5c00eca7899e4fcf8a105e2ff96534ff2ec995e3b08b51fc6d6c9920"} Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.007241 4769 scope.go:117] "RemoveContainer" containerID="1d5b649be099053b04184140764d8cafa45a2baf51ef02bcd7e92be816615864" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.008111 4769 scope.go:117] "RemoveContainer" containerID="8ee70c6e5c00eca7899e4fcf8a105e2ff96534ff2ec995e3b08b51fc6d6c9920" Nov 25 11:08:40 crc kubenswrapper[4769]: E1125 11:08:40.008515 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9)\"" pod="openstack/kube-state-metrics-0" podUID="f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.053922 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.156342 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.253396 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.254338 4769 scope.go:117] "RemoveContainer" containerID="bc469b6674d8415450a3a7ac3d780f092d9fec2269ea54e1291e18eff7c1f0b4" Nov 25 11:08:40 crc kubenswrapper[4769]: E1125 11:08:40.254621 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-6c6f9bd7cc-cp48g_openstack-operators(70f81d0a-db58-4bd4-a0e2-ee1c03e2f923)\"" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" podUID="70f81d0a-db58-4bd4-a0e2-ee1c03e2f923" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.264101 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.312067 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.312548 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.362426 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-b7bcg" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.373003 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.446365 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-9v6jl" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.612200 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.622701 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.638222 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.762726 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.826472 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.833232 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-6z7lp" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.842124 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.913754 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.943098 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 11:08:40 crc kubenswrapper[4769]: I1125 11:08:40.992005 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.010488 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.022424 4769 scope.go:117] "RemoveContainer" containerID="8ee70c6e5c00eca7899e4fcf8a105e2ff96534ff2ec995e3b08b51fc6d6c9920" Nov 25 11:08:41 crc kubenswrapper[4769]: E1125 11:08:41.022982 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9)\"" pod="openstack/kube-state-metrics-0" podUID="f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.035830 4769 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.101869 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.269113 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-7gfdb" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.270908 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.306541 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.327634 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.379949 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-kz8wz" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.392742 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.397518 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.409303 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-rgbvj" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.456100 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.458136 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.467197 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.523231 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.615548 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.709943 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-qhw9n" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.724980 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.735298 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.753326 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.763196 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.803461 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.810652 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.854546 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.877327 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.880881 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.892710 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 11:08:41 crc kubenswrapper[4769]: I1125 11:08:41.930781 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.025660 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-kpnqf" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.046126 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.052356 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.075174 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-fjp2f" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.076845 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.098830 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.106978 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.107419 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.124945 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.157194 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-k46pn" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.162451 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.175164 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.195884 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.215492 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.219063 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.318401 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.322163 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.351246 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.381760 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.445365 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.473030 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.485809 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.497339 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-cvdml" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.498206 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.527816 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.546441 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.626204 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.664800 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.679865 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.694892 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.767903 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.803404 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.885073 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.958914 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.973640 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.986449 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-54tjz" Nov 25 11:08:42 crc kubenswrapper[4769]: I1125 11:08:42.992431 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.005709 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.007794 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.037305 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.052657 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.063611 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.121512 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.174947 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-5nnz9" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.188137 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.212710 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.217469 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.226074 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.249810 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-pldh2" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.250338 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.261640 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.288124 4769 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.290015 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-4zjb9" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.295425 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.310327 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.313458 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.349421 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.389360 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.425456 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.430153 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.430625 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.451673 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.477541 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.496169 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.496777 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-d89x5" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.550136 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.552795 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.584680 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.600452 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.611779 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.663837 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.675946 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.730869 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-mp9wj" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.743050 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.836593 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.839029 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.847500 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-pdvwp" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.931207 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 11:08:43 crc kubenswrapper[4769]: I1125 11:08:43.980058 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.002397 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-79t9w" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.007428 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.160805 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.163224 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.165970 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-dnsfl" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.176349 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.194266 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.195791 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.244113 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.291083 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.403055 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.444353 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.513576 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.539067 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-vc5tq" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.564401 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.599939 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.610097 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-khqxn" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.611866 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.624199 4769 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.651844 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-975vr" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.666347 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-zlwdz" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.707400 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.722076 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.780477 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.782426 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.894263 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.936821 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.949622 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.958561 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 11:08:44 crc kubenswrapper[4769]: I1125 11:08:44.973871 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-78b9k" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.011353 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.039355 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.046890 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.065583 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.075930 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.095957 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.104206 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-9tnjw" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.168366 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.189066 4769 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-vn64l" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.223341 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.227044 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.230759 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.236898 4769 scope.go:117] "RemoveContainer" containerID="cd7b3e66133ef9e6cf910abf987e916bbca2ff088f170e7afdf537fa37db392e" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.237672 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.239870 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.262501 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.292829 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.336793 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.362448 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.384322 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.393020 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-xngcz" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.407168 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.433874 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.442461 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.498486 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.498540 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.534752 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.537920 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.569893 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.576141 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.599672 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.606597 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.641840 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.660834 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.664215 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.668017 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.668341 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.668923 4769 scope.go:117] "RemoveContainer" containerID="8ee70c6e5c00eca7899e4fcf8a105e2ff96534ff2ec995e3b08b51fc6d6c9920" Nov 25 11:08:45 crc kubenswrapper[4769]: E1125 11:08:45.669317 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9)\"" pod="openstack/kube-state-metrics-0" podUID="f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.671054 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.685273 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.720098 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.782032 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.784757 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-cjscj" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.791104 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.803978 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.850817 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.867828 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-6n7lc" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.883933 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.884170 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.926645 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-px795" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.985264 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 25 11:08:45 crc kubenswrapper[4769]: I1125 11:08:45.986669 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.009704 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.066047 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-c4qrf" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.077270 4769 generic.go:334] "Generic (PLEG): container finished" podID="a032414e-4be2-47f7-ac88-3bdec0ccb151" containerID="f085586e097d99e6a0130457fdecb9ff7a0569323a0150618c6a97dc4980df25" exitCode=1 Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.077324 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" event={"ID":"a032414e-4be2-47f7-ac88-3bdec0ccb151","Type":"ContainerDied","Data":"f085586e097d99e6a0130457fdecb9ff7a0569323a0150618c6a97dc4980df25"} Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.077394 4769 scope.go:117] "RemoveContainer" containerID="cd7b3e66133ef9e6cf910abf987e916bbca2ff088f170e7afdf537fa37db392e" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.078046 4769 scope.go:117] "RemoveContainer" containerID="8ee70c6e5c00eca7899e4fcf8a105e2ff96534ff2ec995e3b08b51fc6d6c9920" Nov 25 11:08:46 crc kubenswrapper[4769]: E1125 11:08:46.078366 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9)\"" pod="openstack/kube-state-metrics-0" podUID="f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.078711 4769 scope.go:117] "RemoveContainer" containerID="f085586e097d99e6a0130457fdecb9ff7a0569323a0150618c6a97dc4980df25" Nov 25 11:08:46 crc kubenswrapper[4769]: E1125 11:08:46.079105 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-bzrbr_openstack-operators(a032414e-4be2-47f7-ac88-3bdec0ccb151)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" podUID="a032414e-4be2-47f7-ac88-3bdec0ccb151" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.087226 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.096583 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.197103 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.227442 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.237035 4769 scope.go:117] "RemoveContainer" containerID="93feba38754e7e7cf6fd7ce94bfbe24770aa18f243ce1c1b5ddd2f51902c5ec9" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.238186 4769 scope.go:117] "RemoveContainer" containerID="74e51c5514768f63cf30779a9a9ff75255ce78baf6763035fc39525e117305ba" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.297273 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.325377 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.431838 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.431894 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.462847 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.463583 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-j5zw9" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.466048 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.496585 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.505502 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.558876 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-bnc4t" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.563153 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.569504 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.582155 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.590450 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-zhplb" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.593239 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.647224 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.684652 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-zhjgb" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.719229 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.768766 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.834719 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.889247 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.893231 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.930370 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 11:08:46 crc kubenswrapper[4769]: I1125 11:08:46.952979 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.014977 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.021388 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.026458 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-l4lh7" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.041939 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.044756 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.057748 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.073609 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.080317 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.081222 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.083869 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.089842 4769 generic.go:334] "Generic (PLEG): container finished" podID="a2f1ad69-27e4-4131-a742-a8d2c5df8636" containerID="ade37617c9c8ca458df14d6683e467d37c8e17a1a4f94a5b4006aa7618e78c7b" exitCode=1 Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.089922 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" event={"ID":"a2f1ad69-27e4-4131-a742-a8d2c5df8636","Type":"ContainerDied","Data":"ade37617c9c8ca458df14d6683e467d37c8e17a1a4f94a5b4006aa7618e78c7b"} Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.089999 4769 scope.go:117] "RemoveContainer" containerID="74e51c5514768f63cf30779a9a9ff75255ce78baf6763035fc39525e117305ba" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.090763 4769 scope.go:117] "RemoveContainer" containerID="ade37617c9c8ca458df14d6683e467d37c8e17a1a4f94a5b4006aa7618e78c7b" Nov 25 11:08:47 crc kubenswrapper[4769]: E1125 11:08:47.091215 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-66cf5c67ff-68bvf_openstack-operators(a2f1ad69-27e4-4131-a742-a8d2c5df8636)\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" podUID="a2f1ad69-27e4-4131-a742-a8d2c5df8636" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.092493 4769 generic.go:334] "Generic (PLEG): container finished" podID="6d9be953-34ea-4956-96bf-84d5f8babb2d" containerID="192b50f0ff9ad2e8a1a9e7106cbcdd244285e3bd8f176702a77627bfa9a67045" exitCode=1 Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.092573 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" event={"ID":"6d9be953-34ea-4956-96bf-84d5f8babb2d","Type":"ContainerDied","Data":"192b50f0ff9ad2e8a1a9e7106cbcdd244285e3bd8f176702a77627bfa9a67045"} Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.093776 4769 scope.go:117] "RemoveContainer" containerID="192b50f0ff9ad2e8a1a9e7106cbcdd244285e3bd8f176702a77627bfa9a67045" Nov 25 11:08:47 crc kubenswrapper[4769]: E1125 11:08:47.094188 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-7c57c8bbc4-rgn2z_openstack-operators(6d9be953-34ea-4956-96bf-84d5f8babb2d)\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" podUID="6d9be953-34ea-4956-96bf-84d5f8babb2d" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.100215 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.126064 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.133747 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.150629 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.150747 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.164618 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.170706 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.180346 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.190440 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.224237 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.236709 4769 scope.go:117] "RemoveContainer" containerID="935b97555d27421b14befc4592ae226765148116fac79ea0f3e09138aa321842" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.236744 4769 scope.go:117] "RemoveContainer" containerID="4c5e7e950f6538b39e9f699e1a741178dc60284160600dbf7845ddbf614e82cd" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.238087 4769 scope.go:117] "RemoveContainer" containerID="6846c874242d9e6c04dcab0a1fe89dd686acb8a1f3b9e35b6e6e4a9c1b5f5618" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.248823 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-wx5bm" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.256438 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.270296 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.301183 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-7j7cf" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.304890 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.326438 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.334561 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.335173 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-wdv86" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.411189 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.422282 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.431304 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.437841 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.460579 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.467221 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.478770 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.507022 4769 scope.go:117] "RemoveContainer" containerID="93feba38754e7e7cf6fd7ce94bfbe24770aa18f243ce1c1b5ddd2f51902c5ec9" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.517765 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.553445 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.553634 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.562945 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.590433 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-8l6e4i4iqssqo" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.592617 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.596362 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-6l42b" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.607838 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.616797 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.645602 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.664099 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.664359 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.683506 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.704780 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.723778 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.748524 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.753740 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.769810 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.785111 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.791183 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.806104 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.807511 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-sc4ww" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.818853 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-sw897" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.821746 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.838187 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.925937 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.927883 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.963315 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.981575 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 11:08:47 crc kubenswrapper[4769]: I1125 11:08:47.986361 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.007356 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.020566 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.039450 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.073754 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.111117 4769 generic.go:334] "Generic (PLEG): container finished" podID="67abea47-5e8a-43a2-8865-929cfdfc607c" containerID="7c3380e5b07d625f1dcad94641ead927af8109d9dcb2f314fb8dd4154820908b" exitCode=1 Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.111208 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" event={"ID":"67abea47-5e8a-43a2-8865-929cfdfc607c","Type":"ContainerDied","Data":"7c3380e5b07d625f1dcad94641ead927af8109d9dcb2f314fb8dd4154820908b"} Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.111280 4769 scope.go:117] "RemoveContainer" containerID="6846c874242d9e6c04dcab0a1fe89dd686acb8a1f3b9e35b6e6e4a9c1b5f5618" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.111651 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.112322 4769 scope.go:117] "RemoveContainer" containerID="7c3380e5b07d625f1dcad94641ead927af8109d9dcb2f314fb8dd4154820908b" Nov 25 11:08:48 crc kubenswrapper[4769]: E1125 11:08:48.113100 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-w8ftq_openstack-operators(67abea47-5e8a-43a2-8865-929cfdfc607c)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" podUID="67abea47-5e8a-43a2-8865-929cfdfc607c" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.122279 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.132023 4769 generic.go:334] "Generic (PLEG): container finished" podID="1b8cd25d-43dd-4774-b1d9-59572bb6bef7" containerID="3ec051fc18a0ed5dd205e14155e6b88e962ed8ef7f45b098b1083440ef362933" exitCode=1 Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.132095 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" event={"ID":"1b8cd25d-43dd-4774-b1d9-59572bb6bef7","Type":"ContainerDied","Data":"3ec051fc18a0ed5dd205e14155e6b88e962ed8ef7f45b098b1083440ef362933"} Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.133050 4769 scope.go:117] "RemoveContainer" containerID="3ec051fc18a0ed5dd205e14155e6b88e962ed8ef7f45b098b1083440ef362933" Nov 25 11:08:48 crc kubenswrapper[4769]: E1125 11:08:48.134106 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rcv4n_openstack-operators(1b8cd25d-43dd-4774-b1d9-59572bb6bef7)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" podUID="1b8cd25d-43dd-4774-b1d9-59572bb6bef7" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.142576 4769 generic.go:334] "Generic (PLEG): container finished" podID="1be22f03-8697-413b-922c-9344185c05c4" containerID="334e5a38c2dddecc1080d89fdb35cd17e4414bb3157ba3bd0dfff520a809643a" exitCode=1 Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.142611 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" event={"ID":"1be22f03-8697-413b-922c-9344185c05c4","Type":"ContainerDied","Data":"334e5a38c2dddecc1080d89fdb35cd17e4414bb3157ba3bd0dfff520a809643a"} Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.143876 4769 scope.go:117] "RemoveContainer" containerID="334e5a38c2dddecc1080d89fdb35cd17e4414bb3157ba3bd0dfff520a809643a" Nov 25 11:08:48 crc kubenswrapper[4769]: E1125 11:08:48.144607 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-g8xqz_openstack-operators(1be22f03-8697-413b-922c-9344185c05c4)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" podUID="1be22f03-8697-413b-922c-9344185c05c4" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.155569 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.157431 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-5jpzm" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.175531 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.189516 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-592h4" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.214476 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.234500 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.241769 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-vjwbl" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.243064 4769 scope.go:117] "RemoveContainer" containerID="8154a6c938b49fa33eff62181bdd315b08ce8beb4a1fe0b024ac73bf728240ee" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.244120 4769 scope.go:117] "RemoveContainer" containerID="4c5e7e950f6538b39e9f699e1a741178dc60284160600dbf7845ddbf614e82cd" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.245422 4769 scope.go:117] "RemoveContainer" containerID="39d9302b3639f4ddffeaa35d2e8fc70b20f5c0cc9b12bbd32022210155aaad67" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.246197 4769 scope.go:117] "RemoveContainer" containerID="80a8ac840c40863e509142f82abbd1eac0a87057401260cd1cf0950115419adf" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.260953 4769 scope.go:117] "RemoveContainer" containerID="6d68c8b35baf981f2d94ddadea913524158f563f9d15ef8038e939ee85cb6bc5" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.316356 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.331478 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-h8hwm" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.354886 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.364769 4769 scope.go:117] "RemoveContainer" containerID="935b97555d27421b14befc4592ae226765148116fac79ea0f3e09138aa321842" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.406168 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.433007 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.452162 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.460601 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-7qxns" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.517930 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.535117 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.558834 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.709274 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.711010 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.733802 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.744356 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.797327 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.830419 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.847640 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-2dhrc" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.850063 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.918296 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.937802 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.950819 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.961176 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.987240 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Nov 25 11:08:48 crc kubenswrapper[4769]: I1125 11:08:48.994212 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.019135 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.045912 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.054279 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.093692 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.171593 4769 generic.go:334] "Generic (PLEG): container finished" podID="63e66921-47a7-407a-b50e-06cf5cadb8be" containerID="bdca6b56630d0dc81232cbaf70ba4fb1dda827a4b73883e5cce0ce5cb4a5a6e4" exitCode=1 Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.171669 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" event={"ID":"63e66921-47a7-407a-b50e-06cf5cadb8be","Type":"ContainerDied","Data":"bdca6b56630d0dc81232cbaf70ba4fb1dda827a4b73883e5cce0ce5cb4a5a6e4"} Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.171703 4769 scope.go:117] "RemoveContainer" containerID="39d9302b3639f4ddffeaa35d2e8fc70b20f5c0cc9b12bbd32022210155aaad67" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.172420 4769 scope.go:117] "RemoveContainer" containerID="bdca6b56630d0dc81232cbaf70ba4fb1dda827a4b73883e5cce0ce5cb4a5a6e4" Nov 25 11:08:49 crc kubenswrapper[4769]: E1125 11:08:49.172813 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-58bb8d67cc-qdjsd_openstack-operators(63e66921-47a7-407a-b50e-06cf5cadb8be)\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" podUID="63e66921-47a7-407a-b50e-06cf5cadb8be" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.196492 4769 generic.go:334] "Generic (PLEG): container finished" podID="9d0ef7c9-7421-4fe2-b1c8-551253bea174" containerID="836ee524fe136e22b1a4cdee89d7bdc2bac71512f4048a2178fe46c04b9cb5b5" exitCode=1 Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.196585 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" event={"ID":"9d0ef7c9-7421-4fe2-b1c8-551253bea174","Type":"ContainerDied","Data":"836ee524fe136e22b1a4cdee89d7bdc2bac71512f4048a2178fe46c04b9cb5b5"} Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.198229 4769 scope.go:117] "RemoveContainer" containerID="836ee524fe136e22b1a4cdee89d7bdc2bac71512f4048a2178fe46c04b9cb5b5" Nov 25 11:08:49 crc kubenswrapper[4769]: E1125 11:08:49.198714 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-fknhz_openstack-operators(9d0ef7c9-7421-4fe2-b1c8-551253bea174)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" podUID="9d0ef7c9-7421-4fe2-b1c8-551253bea174" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.207477 4769 generic.go:334] "Generic (PLEG): container finished" podID="1f5735dc-67e5-423c-9a8f-d42977c892d3" containerID="c533a1589704ed52ed006ecd3bc686cb379289081ed185a1304a7c12882672b5" exitCode=1 Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.207544 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" event={"ID":"1f5735dc-67e5-423c-9a8f-d42977c892d3","Type":"ContainerDied","Data":"c533a1589704ed52ed006ecd3bc686cb379289081ed185a1304a7c12882672b5"} Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.208298 4769 scope.go:117] "RemoveContainer" containerID="c533a1589704ed52ed006ecd3bc686cb379289081ed185a1304a7c12882672b5" Nov 25 11:08:49 crc kubenswrapper[4769]: E1125 11:08:49.208566 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-hh9nc_openstack-operators(1f5735dc-67e5-423c-9a8f-d42977c892d3)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" podUID="1f5735dc-67e5-423c-9a8f-d42977c892d3" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.215894 4769 generic.go:334] "Generic (PLEG): container finished" podID="c4701907-0ccc-4866-bb98-6005539baa95" containerID="b18182edde172f1e0a1f70d42df5bb2b91997cb1df1e264b4f617b7507df06d8" exitCode=1 Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.216062 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" event={"ID":"c4701907-0ccc-4866-bb98-6005539baa95","Type":"ContainerDied","Data":"b18182edde172f1e0a1f70d42df5bb2b91997cb1df1e264b4f617b7507df06d8"} Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.217490 4769 scope.go:117] "RemoveContainer" containerID="b18182edde172f1e0a1f70d42df5bb2b91997cb1df1e264b4f617b7507df06d8" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.218325 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.225231 4769 generic.go:334] "Generic (PLEG): container finished" podID="b59cac8b-fb36-4316-ab83-da7202b67af5" containerID="c3296068f0f3100d34066d1fa60c281000f795748c9934e078d92db18c9d1246" exitCode=1 Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.225312 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" event={"ID":"b59cac8b-fb36-4316-ab83-da7202b67af5","Type":"ContainerDied","Data":"c3296068f0f3100d34066d1fa60c281000f795748c9934e078d92db18c9d1246"} Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.226165 4769 scope.go:117] "RemoveContainer" containerID="c3296068f0f3100d34066d1fa60c281000f795748c9934e078d92db18c9d1246" Nov 25 11:08:49 crc kubenswrapper[4769]: E1125 11:08:49.226811 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-47gs9_openstack-operators(b59cac8b-fb36-4316-ab83-da7202b67af5)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" podUID="b59cac8b-fb36-4316-ab83-da7202b67af5" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.236703 4769 scope.go:117] "RemoveContainer" containerID="d9746733b1af68e57c0f0b4acdb234fe445aa5b0e603e575247719cfbbf4f531" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.239281 4769 scope.go:117] "RemoveContainer" containerID="75a8806d784335960667ff2525b8d068924d809e301c59b9cebdb36bc3c50322" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.239574 4769 scope.go:117] "RemoveContainer" containerID="19aa616ade2c53210e39d6c8147c08b8a7b9cc9666a609ef0e2a2e1b58af8099" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.243842 4769 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-p4cx9" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.274370 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.285521 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.311781 4769 scope.go:117] "RemoveContainer" containerID="8154a6c938b49fa33eff62181bdd315b08ce8beb4a1fe0b024ac73bf728240ee" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.336151 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.479015 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.551096 4769 scope.go:117] "RemoveContainer" containerID="80a8ac840c40863e509142f82abbd1eac0a87057401260cd1cf0950115419adf" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.628586 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.630440 4769 scope.go:117] "RemoveContainer" containerID="6d68c8b35baf981f2d94ddadea913524158f563f9d15ef8038e939ee85cb6bc5" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.638420 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.668198 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.695490 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.695769 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.702251 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.725582 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-rdrph" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.730389 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.753604 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.756667 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.781461 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.791618 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.793944 4769 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.795715 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.800506 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.811625 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.816108 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-whql5" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.881057 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.894281 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.909542 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.915780 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.931198 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.940568 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.966822 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.984185 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-gktzn" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.990787 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 11:08:49 crc kubenswrapper[4769]: I1125 11:08:49.999178 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-tzvw8" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.021947 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.043527 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.065390 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.100465 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-9bhp9" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.120404 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.122036 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.131331 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.155503 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.177278 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.237494 4769 scope.go:117] "RemoveContainer" containerID="3136679e457ea57a5443a8478eb8b66b5f5380f21d180fc6949b77b7dfce4d4a" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.237670 4769 scope.go:117] "RemoveContainer" containerID="c1e899e7db8749175807d7e9689a02703d96cd41a40b06f86768cd9bc5c54225" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.237886 4769 scope.go:117] "RemoveContainer" containerID="9d286c2176c6173a5a0086a2d9310bcc91bcf6f63925c738b22455d7b99e8e29" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.238024 4769 scope.go:117] "RemoveContainer" containerID="32e2745bbc5cdad2789665569e0dc2773da3459d31ff638abb7caa8d8eba887b" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.238794 4769 scope.go:117] "RemoveContainer" containerID="0fd74ffbac90ad0f5f9e8b7c88317416a57234b04509525e230ab6f94b0adc83" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.239442 4769 scope.go:117] "RemoveContainer" containerID="a23fcc71d50f18f3fe01708b8edccb8721556587c427281e3b2e049cdbb319ad" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.258361 4769 generic.go:334] "Generic (PLEG): container finished" podID="8e7436d0-2ff7-4a11-9ab8-74a91e56de4a" containerID="08e2200c2a0a7ac276416be303052b5384f3e7af2792ff6e63217f7e655f64e3" exitCode=1 Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.262852 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" event={"ID":"8e7436d0-2ff7-4a11-9ab8-74a91e56de4a","Type":"ContainerDied","Data":"08e2200c2a0a7ac276416be303052b5384f3e7af2792ff6e63217f7e655f64e3"} Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.262921 4769 scope.go:117] "RemoveContainer" containerID="75a8806d784335960667ff2525b8d068924d809e301c59b9cebdb36bc3c50322" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.263872 4769 scope.go:117] "RemoveContainer" containerID="08e2200c2a0a7ac276416be303052b5384f3e7af2792ff6e63217f7e655f64e3" Nov 25 11:08:50 crc kubenswrapper[4769]: E1125 11:08:50.264230 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-rz7fl_openstack-operators(8e7436d0-2ff7-4a11-9ab8-74a91e56de4a)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" podUID="8e7436d0-2ff7-4a11-9ab8-74a91e56de4a" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.302785 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.315854 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" event={"ID":"c4701907-0ccc-4866-bb98-6005539baa95","Type":"ContainerStarted","Data":"9144ba2f199bcd9441c8760825218bf572bfc1e5baa1eb66e353688e727c1e8b"} Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.335813 4769 generic.go:334] "Generic (PLEG): container finished" podID="1da948d8-e834-488a-a3ec-a0c0229ebaf5" containerID="d829f0c4de57efa05c5de61329fc2861dac990c010137964537bb483631bfe49" exitCode=1 Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.335881 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" event={"ID":"1da948d8-e834-488a-a3ec-a0c0229ebaf5","Type":"ContainerDied","Data":"d829f0c4de57efa05c5de61329fc2861dac990c010137964537bb483631bfe49"} Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.336646 4769 scope.go:117] "RemoveContainer" containerID="d829f0c4de57efa05c5de61329fc2861dac990c010137964537bb483631bfe49" Nov 25 11:08:50 crc kubenswrapper[4769]: E1125 11:08:50.336947 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-m2qxd_openstack-operators(1da948d8-e834-488a-a3ec-a0c0229ebaf5)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" podUID="1da948d8-e834-488a-a3ec-a0c0229ebaf5" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.348795 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.352065 4769 generic.go:334] "Generic (PLEG): container finished" podID="aaa65e3e-75e2-4f50-b9d6-aa9710a6e394" containerID="c94dc750ec46009b876bdffd1bbff8dd13523ac00762c75027e94e19fa540798" exitCode=1 Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.352168 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" event={"ID":"aaa65e3e-75e2-4f50-b9d6-aa9710a6e394","Type":"ContainerDied","Data":"c94dc750ec46009b876bdffd1bbff8dd13523ac00762c75027e94e19fa540798"} Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.352273 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-gqg8c" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.352843 4769 scope.go:117] "RemoveContainer" containerID="c94dc750ec46009b876bdffd1bbff8dd13523ac00762c75027e94e19fa540798" Nov 25 11:08:50 crc kubenswrapper[4769]: E1125 11:08:50.353141 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-2hp9n_openstack-operators(aaa65e3e-75e2-4f50-b9d6-aa9710a6e394)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" podUID="aaa65e3e-75e2-4f50-b9d6-aa9710a6e394" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.359087 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.396291 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.424171 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.424197 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.425759 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.457492 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.486934 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.489872 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.489870 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.501324 4769 scope.go:117] "RemoveContainer" containerID="19aa616ade2c53210e39d6c8147c08b8a7b9cc9666a609ef0e2a2e1b58af8099" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.504011 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.509977 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-p726j" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.518666 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.540877 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.550601 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.557334 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.577894 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.624763 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.701422 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.735458 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.747332 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.747498 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.749313 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.758543 4769 scope.go:117] "RemoveContainer" containerID="d9746733b1af68e57c0f0b4acdb234fe445aa5b0e603e575247719cfbbf4f531" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.765575 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.785445 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.794806 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.801101 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.801779 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.809862 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.810946 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-g5zsh" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.811849 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-45xsn" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.824603 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.844151 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.862492 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.880619 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.896730 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-bl6xm" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.903201 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.906554 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-w78jc" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.909408 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.926917 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.931639 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.931859 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.946828 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-z2l4v" Nov 25 11:08:50 crc kubenswrapper[4769]: I1125 11:08:50.950013 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.016804 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-5hacg5kv5cqdb" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.018334 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.050407 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.105204 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.111255 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.157160 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.160908 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-pgzgx" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.179814 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.182492 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-4ctxp" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.189796 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.207061 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.237721 4769 scope.go:117] "RemoveContainer" containerID="23d5359c8568a3cd19ca6542473ddedef1de346a9c83e627f43183311089f7da" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.291484 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.308287 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-zjtmv" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.309829 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.357691 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.363990 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.382984 4769 generic.go:334] "Generic (PLEG): container finished" podID="b90cc789-8211-48bc-85cc-1a31ad1af486" containerID="7a45361e992b307f0107e532aa2bf871bc434d1574bff681b3b71afac7d7e53a" exitCode=1 Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.383259 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" event={"ID":"b90cc789-8211-48bc-85cc-1a31ad1af486","Type":"ContainerDied","Data":"7a45361e992b307f0107e532aa2bf871bc434d1574bff681b3b71afac7d7e53a"} Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.383307 4769 scope.go:117] "RemoveContainer" containerID="3136679e457ea57a5443a8478eb8b66b5f5380f21d180fc6949b77b7dfce4d4a" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.384647 4769 scope.go:117] "RemoveContainer" containerID="7a45361e992b307f0107e532aa2bf871bc434d1574bff681b3b71afac7d7e53a" Nov 25 11:08:51 crc kubenswrapper[4769]: E1125 11:08:51.386045 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-zv5xr_openstack-operators(b90cc789-8211-48bc-85cc-1a31ad1af486)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" podUID="b90cc789-8211-48bc-85cc-1a31ad1af486" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.389508 4769 generic.go:334] "Generic (PLEG): container finished" podID="0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd" containerID="a9f56107a7921df8ef77f73698268fffe3e95324cdf2a5e0399a223d81211db8" exitCode=1 Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.389584 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" event={"ID":"0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd","Type":"ContainerDied","Data":"a9f56107a7921df8ef77f73698268fffe3e95324cdf2a5e0399a223d81211db8"} Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.390209 4769 scope.go:117] "RemoveContainer" containerID="a9f56107a7921df8ef77f73698268fffe3e95324cdf2a5e0399a223d81211db8" Nov 25 11:08:51 crc kubenswrapper[4769]: E1125 11:08:51.390598 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-7xcjk_openstack-operators(0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" podUID="0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.405540 4769 generic.go:334] "Generic (PLEG): container finished" podID="4894deb0-65ca-4b42-b397-4092a75739c9" containerID="f0c1f3186b2503e0a083f68329051df79ef8faacfbe24d4f3ffc198ebb2b5c18" exitCode=1 Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.405604 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" event={"ID":"4894deb0-65ca-4b42-b397-4092a75739c9","Type":"ContainerDied","Data":"f0c1f3186b2503e0a083f68329051df79ef8faacfbe24d4f3ffc198ebb2b5c18"} Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.406307 4769 scope.go:117] "RemoveContainer" containerID="f0c1f3186b2503e0a083f68329051df79ef8faacfbe24d4f3ffc198ebb2b5c18" Nov 25 11:08:51 crc kubenswrapper[4769]: E1125 11:08:51.406558 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-68b95954c9-k2htr_openstack-operators(4894deb0-65ca-4b42-b397-4092a75739c9)\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" podUID="4894deb0-65ca-4b42-b397-4092a75739c9" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.421139 4769 generic.go:334] "Generic (PLEG): container finished" podID="c4701907-0ccc-4866-bb98-6005539baa95" containerID="9144ba2f199bcd9441c8760825218bf572bfc1e5baa1eb66e353688e727c1e8b" exitCode=1 Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.421209 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" event={"ID":"c4701907-0ccc-4866-bb98-6005539baa95","Type":"ContainerDied","Data":"9144ba2f199bcd9441c8760825218bf572bfc1e5baa1eb66e353688e727c1e8b"} Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.436912 4769 generic.go:334] "Generic (PLEG): container finished" podID="18e1910e-52b2-439b-a93f-4ffe63a7b992" containerID="33ffee1db1abcb01e39cf2f6f963358af41206cf758ac6a6e42b89c43791d7d6" exitCode=1 Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.436998 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" event={"ID":"18e1910e-52b2-439b-a93f-4ffe63a7b992","Type":"ContainerDied","Data":"33ffee1db1abcb01e39cf2f6f963358af41206cf758ac6a6e42b89c43791d7d6"} Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.437696 4769 scope.go:117] "RemoveContainer" containerID="33ffee1db1abcb01e39cf2f6f963358af41206cf758ac6a6e42b89c43791d7d6" Nov 25 11:08:51 crc kubenswrapper[4769]: E1125 11:08:51.437977 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-wssdn_openstack-operators(18e1910e-52b2-439b-a93f-4ffe63a7b992)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" podUID="18e1910e-52b2-439b-a93f-4ffe63a7b992" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.441548 4769 scope.go:117] "RemoveContainer" containerID="9144ba2f199bcd9441c8760825218bf572bfc1e5baa1eb66e353688e727c1e8b" Nov 25 11:08:51 crc kubenswrapper[4769]: E1125 11:08:51.442149 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cert-manager-cainjector\" with CrashLoopBackOff: \"back-off 10s restarting failed container=cert-manager-cainjector pod=cert-manager-cainjector-7f985d654d-fwc8s_cert-manager(c4701907-0ccc-4866-bb98-6005539baa95)\"" pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" podUID="c4701907-0ccc-4866-bb98-6005539baa95" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.442488 4769 generic.go:334] "Generic (PLEG): container finished" podID="88cb8ad7-c855-45eb-a471-aacb8c42082c" containerID="dc3cdaff4c0ec1b8b195b88e8e24abf9547700ecdec4a6c0a714c1e76cbe744c" exitCode=1 Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.442537 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" event={"ID":"88cb8ad7-c855-45eb-a471-aacb8c42082c","Type":"ContainerDied","Data":"dc3cdaff4c0ec1b8b195b88e8e24abf9547700ecdec4a6c0a714c1e76cbe744c"} Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.442891 4769 scope.go:117] "RemoveContainer" containerID="dc3cdaff4c0ec1b8b195b88e8e24abf9547700ecdec4a6c0a714c1e76cbe744c" Nov 25 11:08:51 crc kubenswrapper[4769]: E1125 11:08:51.443131 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-kbhzd_openstack-operators(88cb8ad7-c855-45eb-a471-aacb8c42082c)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" podUID="88cb8ad7-c855-45eb-a471-aacb8c42082c" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.459835 4769 generic.go:334] "Generic (PLEG): container finished" podID="136f8f90-5673-4a08-ab4b-c030c1c428a6" containerID="4819bf42ef924a4f4c1452e05c4935251a8a3a0b3104d56ba98796f9cabc7256" exitCode=1 Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.459879 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" event={"ID":"136f8f90-5673-4a08-ab4b-c030c1c428a6","Type":"ContainerDied","Data":"4819bf42ef924a4f4c1452e05c4935251a8a3a0b3104d56ba98796f9cabc7256"} Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.460610 4769 scope.go:117] "RemoveContainer" containerID="4819bf42ef924a4f4c1452e05c4935251a8a3a0b3104d56ba98796f9cabc7256" Nov 25 11:08:51 crc kubenswrapper[4769]: E1125 11:08:51.460871 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-d5cc86f4b-nktn6_openstack-operators(136f8f90-5673-4a08-ab4b-c030c1c428a6)\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" podUID="136f8f90-5673-4a08-ab4b-c030c1c428a6" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.515556 4769 scope.go:117] "RemoveContainer" containerID="0fd74ffbac90ad0f5f9e8b7c88317416a57234b04509525e230ab6f94b0adc83" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.570787 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.608015 4769 scope.go:117] "RemoveContainer" containerID="c1e899e7db8749175807d7e9689a02703d96cd41a40b06f86768cd9bc5c54225" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.608861 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.626049 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.673735 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.695910 4769 scope.go:117] "RemoveContainer" containerID="b18182edde172f1e0a1f70d42df5bb2b91997cb1df1e264b4f617b7507df06d8" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.721729 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.747319 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.749707 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.772984 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.782310 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.795365 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.804043 4769 scope.go:117] "RemoveContainer" containerID="32e2745bbc5cdad2789665569e0dc2773da3459d31ff638abb7caa8d8eba887b" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.896622 4769 scope.go:117] "RemoveContainer" containerID="a23fcc71d50f18f3fe01708b8edccb8721556587c427281e3b2e049cdbb319ad" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.899084 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.921184 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.949682 4769 scope.go:117] "RemoveContainer" containerID="9d286c2176c6173a5a0086a2d9310bcc91bcf6f63925c738b22455d7b99e8e29" Nov 25 11:08:51 crc kubenswrapper[4769]: I1125 11:08:51.982570 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.015606 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.024389 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.071400 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.120023 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.157228 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.167617 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.179381 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.183143 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.222855 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.291504 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.291561 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.301162 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.341239 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.364947 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.427611 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.480919 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.490174 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-cxvnb" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.490423 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-shlvx" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.500609 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" event={"ID":"c1d16c0c-cca9-4794-8a52-c8674d9a069e","Type":"ContainerStarted","Data":"4762455435e4f83bfc1319646986a919b39ef90109cfd01521dc6502c1c47eff"} Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.502069 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.533382 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.579999 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.604850 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.607340 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.645488 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.696502 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.730772 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.742120 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.791100 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.853307 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.864675 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.874781 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.904796 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.934259 4769 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-l6q4f" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.940272 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 11:08:52 crc kubenswrapper[4769]: I1125 11:08:52.945401 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.000157 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.035796 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.094609 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.113751 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.136517 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.237191 4769 scope.go:117] "RemoveContainer" containerID="f682d6ba2b3a76e5b33560b16ccf1b9209e6341d4926ea2e556ef1f28310d774" Nov 25 11:08:53 crc kubenswrapper[4769]: E1125 11:08:53.237557 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-64569bb78d-pzqdq_metallb-system(9441dbc7-716c-413e-b0ea-bf1ef05b1608)\"" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" podUID="9441dbc7-716c-413e-b0ea-bf1ef05b1608" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.330289 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.358534 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.384680 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.392710 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.465719 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.522109 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.539204 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.553819 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.554685 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.587540 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.611738 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.630008 4769 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-dx6j5" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.766917 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.786883 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.850098 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.854737 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.855542 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-qn2th" Nov 25 11:08:53 crc kubenswrapper[4769]: I1125 11:08:53.967615 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.056472 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-wtzkt" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.097394 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.099051 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.103312 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.111332 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-trs84" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.145672 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.184119 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.237218 4769 scope.go:117] "RemoveContainer" containerID="bc469b6674d8415450a3a7ac3d780f092d9fec2269ea54e1291e18eff7c1f0b4" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.245747 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.252503 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-cdf4f" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.252615 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.256299 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.390658 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.434225 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-4n0884lam7386" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.480517 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.650775 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.727477 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.816788 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.828813 4769 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-plqb6" Nov 25 11:08:54 crc kubenswrapper[4769]: I1125 11:08:54.898070 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.001080 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.001139 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.001948 4769 scope.go:117] "RemoveContainer" containerID="7a45361e992b307f0107e532aa2bf871bc434d1574bff681b3b71afac7d7e53a" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.002310 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-zv5xr_openstack-operators(b90cc789-8211-48bc-85cc-1a31ad1af486)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" podUID="b90cc789-8211-48bc-85cc-1a31ad1af486" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.008922 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.009032 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.010015 4769 scope.go:117] "RemoveContainer" containerID="c94dc750ec46009b876bdffd1bbff8dd13523ac00762c75027e94e19fa540798" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.010388 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-2hp9n_openstack-operators(aaa65e3e-75e2-4f50-b9d6-aa9710a6e394)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" podUID="aaa65e3e-75e2-4f50-b9d6-aa9710a6e394" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.087214 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.087269 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.088046 4769 scope.go:117] "RemoveContainer" containerID="a9f56107a7921df8ef77f73698268fffe3e95324cdf2a5e0399a223d81211db8" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.088317 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-7xcjk_openstack-operators(0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" podUID="0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.131719 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.132717 4769 scope.go:117] "RemoveContainer" containerID="f0c1f3186b2503e0a083f68329051df79ef8faacfbe24d4f3ffc198ebb2b5c18" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.132992 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-68b95954c9-k2htr_openstack-operators(4894deb0-65ca-4b42-b397-4092a75739c9)\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" podUID="4894deb0-65ca-4b42-b397-4092a75739c9" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.133310 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.191297 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.191360 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.192296 4769 scope.go:117] "RemoveContainer" containerID="7c3380e5b07d625f1dcad94641ead927af8109d9dcb2f314fb8dd4154820908b" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.192610 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-w8ftq_openstack-operators(67abea47-5e8a-43a2-8865-929cfdfc607c)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" podUID="67abea47-5e8a-43a2-8865-929cfdfc607c" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.232801 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.232941 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.233752 4769 scope.go:117] "RemoveContainer" containerID="d829f0c4de57efa05c5de61329fc2861dac990c010137964537bb483631bfe49" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.234102 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-m2qxd_openstack-operators(1da948d8-e834-488a-a3ec-a0c0229ebaf5)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" podUID="1da948d8-e834-488a-a3ec-a0c0229ebaf5" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.267489 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.462065 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.462112 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.463051 4769 scope.go:117] "RemoveContainer" containerID="c3296068f0f3100d34066d1fa60c281000f795748c9934e078d92db18c9d1246" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.463820 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-47gs9_openstack-operators(b59cac8b-fb36-4316-ab83-da7202b67af5)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" podUID="b59cac8b-fb36-4316-ab83-da7202b67af5" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.502269 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.503117 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.503184 4769 scope.go:117] "RemoveContainer" containerID="c533a1589704ed52ed006ecd3bc686cb379289081ed185a1304a7c12882672b5" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.503653 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-hh9nc_openstack-operators(1f5735dc-67e5-423c-9a8f-d42977c892d3)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" podUID="1f5735dc-67e5-423c-9a8f-d42977c892d3" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.557652 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.557718 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.558623 4769 scope.go:117] "RemoveContainer" containerID="bdca6b56630d0dc81232cbaf70ba4fb1dda827a4b73883e5cce0ce5cb4a5a6e4" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.559093 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-58bb8d67cc-qdjsd_openstack-operators(63e66921-47a7-407a-b50e-06cf5cadb8be)\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" podUID="63e66921-47a7-407a-b50e-06cf5cadb8be" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.588650 4769 scope.go:117] "RemoveContainer" containerID="c533a1589704ed52ed006ecd3bc686cb379289081ed185a1304a7c12882672b5" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.588997 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-hh9nc_openstack-operators(1f5735dc-67e5-423c-9a8f-d42977c892d3)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" podUID="1f5735dc-67e5-423c-9a8f-d42977c892d3" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.589505 4769 scope.go:117] "RemoveContainer" containerID="f0c1f3186b2503e0a083f68329051df79ef8faacfbe24d4f3ffc198ebb2b5c18" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.589779 4769 scope.go:117] "RemoveContainer" containerID="d829f0c4de57efa05c5de61329fc2861dac990c010137964537bb483631bfe49" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.590084 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-m2qxd_openstack-operators(1da948d8-e834-488a-a3ec-a0c0229ebaf5)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" podUID="1da948d8-e834-488a-a3ec-a0c0229ebaf5" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.590131 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" event={"ID":"70f81d0a-db58-4bd4-a0e2-ee1c03e2f923","Type":"ContainerStarted","Data":"d8aa7416389a4b83a0d392f1e808b4af06d0401cd88292c9014139f1a871f1d2"} Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.590316 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-68b95954c9-k2htr_openstack-operators(4894deb0-65ca-4b42-b397-4092a75739c9)\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" podUID="4894deb0-65ca-4b42-b397-4092a75739c9" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.590679 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.603823 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.604754 4769 scope.go:117] "RemoveContainer" containerID="836ee524fe136e22b1a4cdee89d7bdc2bac71512f4048a2178fe46c04b9cb5b5" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.605089 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-fknhz_openstack-operators(9d0ef7c9-7421-4fe2-b1c8-551253bea174)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" podUID="9d0ef7c9-7421-4fe2-b1c8-551253bea174" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.605569 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.650010 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.672788 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.694488 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.694534 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.695394 4769 scope.go:117] "RemoveContainer" containerID="192b50f0ff9ad2e8a1a9e7106cbcdd244285e3bd8f176702a77627bfa9a67045" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.695718 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-7c57c8bbc4-rgn2z_openstack-operators(6d9be953-34ea-4956-96bf-84d5f8babb2d)\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" podUID="6d9be953-34ea-4956-96bf-84d5f8babb2d" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.728126 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.728176 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.729042 4769 scope.go:117] "RemoveContainer" containerID="3ec051fc18a0ed5dd205e14155e6b88e962ed8ef7f45b098b1083440ef362933" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.729415 4769 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.729571 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rcv4n_openstack-operators(1b8cd25d-43dd-4774-b1d9-59572bb6bef7)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" podUID="1b8cd25d-43dd-4774-b1d9-59572bb6bef7" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.731659 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=48.731634908 podStartE2EDuration="48.731634908s" podCreationTimestamp="2025-11-25 11:08:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:08:27.635787378 +0000 UTC m=+5056.220759701" watchObservedRunningTime="2025-11-25 11:08:55.731634908 +0000 UTC m=+5084.316607221" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.746885 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.746944 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.758712 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.768513 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.769985 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=28.769940123 podStartE2EDuration="28.769940123s" podCreationTimestamp="2025-11-25 11:08:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:08:55.768460554 +0000 UTC m=+5084.353432877" watchObservedRunningTime="2025-11-25 11:08:55.769940123 +0000 UTC m=+5084.354912436" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.797444 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.810365 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.810449 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-4rmv5" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.811231 4769 scope.go:117] "RemoveContainer" containerID="dc3cdaff4c0ec1b8b195b88e8e24abf9547700ecdec4a6c0a714c1e76cbe744c" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.811569 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-kbhzd_openstack-operators(88cb8ad7-c855-45eb-a471-aacb8c42082c)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" podUID="88cb8ad7-c855-45eb-a471-aacb8c42082c" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.813192 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.816236 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.869375 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.869419 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.870219 4769 scope.go:117] "RemoveContainer" containerID="ade37617c9c8ca458df14d6683e467d37c8e17a1a4f94a5b4006aa7618e78c7b" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.870555 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-66cf5c67ff-68bvf_openstack-operators(a2f1ad69-27e4-4131-a742-a8d2c5df8636)\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" podUID="a2f1ad69-27e4-4131-a742-a8d2c5df8636" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.917719 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.922177 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.922225 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.923168 4769 scope.go:117] "RemoveContainer" containerID="334e5a38c2dddecc1080d89fdb35cd17e4414bb3157ba3bd0dfff520a809643a" Nov 25 11:08:55 crc kubenswrapper[4769]: E1125 11:08:55.923594 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-g8xqz_openstack-operators(1be22f03-8697-413b-922c-9344185c05c4)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" podUID="1be22f03-8697-413b-922c-9344185c05c4" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.940687 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-hvtwr" Nov 25 11:08:55 crc kubenswrapper[4769]: I1125 11:08:55.946231 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.047867 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.049210 4769 scope.go:117] "RemoveContainer" containerID="33ffee1db1abcb01e39cf2f6f963358af41206cf758ac6a6e42b89c43791d7d6" Nov 25 11:08:56 crc kubenswrapper[4769]: E1125 11:08:56.049457 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-wssdn_openstack-operators(18e1910e-52b2-439b-a93f-4ffe63a7b992)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" podUID="18e1910e-52b2-439b-a93f-4ffe63a7b992" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.051670 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.088012 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.168113 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.192073 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-54cf759cb9-dcqfc" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.257505 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.336573 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.336941 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.337900 4769 scope.go:117] "RemoveContainer" containerID="08e2200c2a0a7ac276416be303052b5384f3e7af2792ff6e63217f7e655f64e3" Nov 25 11:08:56 crc kubenswrapper[4769]: E1125 11:08:56.338284 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-rz7fl_openstack-operators(8e7436d0-2ff7-4a11-9ab8-74a91e56de4a)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" podUID="8e7436d0-2ff7-4a11-9ab8-74a91e56de4a" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.600209 4769 scope.go:117] "RemoveContainer" containerID="dc3cdaff4c0ec1b8b195b88e8e24abf9547700ecdec4a6c0a714c1e76cbe744c" Nov 25 11:08:56 crc kubenswrapper[4769]: E1125 11:08:56.600541 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-kbhzd_openstack-operators(88cb8ad7-c855-45eb-a471-aacb8c42082c)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" podUID="88cb8ad7-c855-45eb-a471-aacb8c42082c" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.600583 4769 scope.go:117] "RemoveContainer" containerID="33ffee1db1abcb01e39cf2f6f963358af41206cf758ac6a6e42b89c43791d7d6" Nov 25 11:08:56 crc kubenswrapper[4769]: E1125 11:08:56.600977 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-wssdn_openstack-operators(18e1910e-52b2-439b-a93f-4ffe63a7b992)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" podUID="18e1910e-52b2-439b-a93f-4ffe63a7b992" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.601324 4769 scope.go:117] "RemoveContainer" containerID="836ee524fe136e22b1a4cdee89d7bdc2bac71512f4048a2178fe46c04b9cb5b5" Nov 25 11:08:56 crc kubenswrapper[4769]: E1125 11:08:56.601678 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-fknhz_openstack-operators(9d0ef7c9-7421-4fe2-b1c8-551253bea174)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" podUID="9d0ef7c9-7421-4fe2-b1c8-551253bea174" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.816997 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.817088 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.817878 4769 scope.go:117] "RemoveContainer" containerID="4819bf42ef924a4f4c1452e05c4935251a8a3a0b3104d56ba98796f9cabc7256" Nov 25 11:08:56 crc kubenswrapper[4769]: E1125 11:08:56.818232 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-d5cc86f4b-nktn6_openstack-operators(136f8f90-5673-4a08-ab4b-c030c1c428a6)\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" podUID="136f8f90-5673-4a08-ab4b-c030c1c428a6" Nov 25 11:08:56 crc kubenswrapper[4769]: I1125 11:08:56.960676 4769 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 11:08:57 crc kubenswrapper[4769]: I1125 11:08:57.024151 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 11:08:57 crc kubenswrapper[4769]: I1125 11:08:57.560572 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-gt8nw" Nov 25 11:08:57 crc kubenswrapper[4769]: I1125 11:08:57.608157 4769 scope.go:117] "RemoveContainer" containerID="4819bf42ef924a4f4c1452e05c4935251a8a3a0b3104d56ba98796f9cabc7256" Nov 25 11:08:57 crc kubenswrapper[4769]: E1125 11:08:57.608515 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-d5cc86f4b-nktn6_openstack-operators(136f8f90-5673-4a08-ab4b-c030c1c428a6)\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" podUID="136f8f90-5673-4a08-ab4b-c030c1c428a6" Nov 25 11:08:58 crc kubenswrapper[4769]: I1125 11:08:58.279791 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:58 crc kubenswrapper[4769]: I1125 11:08:58.284432 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:08:59 crc kubenswrapper[4769]: I1125 11:08:59.238289 4769 scope.go:117] "RemoveContainer" containerID="f085586e097d99e6a0130457fdecb9ff7a0569323a0150618c6a97dc4980df25" Nov 25 11:08:59 crc kubenswrapper[4769]: E1125 11:08:59.238935 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-bzrbr_openstack-operators(a032414e-4be2-47f7-ac88-3bdec0ccb151)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" podUID="a032414e-4be2-47f7-ac88-3bdec0ccb151" Nov 25 11:08:59 crc kubenswrapper[4769]: I1125 11:08:59.630558 4769 generic.go:334] "Generic (PLEG): container finished" podID="8002485a-0573-48c9-aeac-f2f5a05cb1ae" containerID="ecb89a83f4fb31c70eae254318d1595d248d0c18445808dacb1d67f50209f547" exitCode=1 Nov 25 11:08:59 crc kubenswrapper[4769]: I1125 11:08:59.630617 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-662dt" event={"ID":"8002485a-0573-48c9-aeac-f2f5a05cb1ae","Type":"ContainerDied","Data":"ecb89a83f4fb31c70eae254318d1595d248d0c18445808dacb1d67f50209f547"} Nov 25 11:08:59 crc kubenswrapper[4769]: I1125 11:08:59.631448 4769 scope.go:117] "RemoveContainer" containerID="ecb89a83f4fb31c70eae254318d1595d248d0c18445808dacb1d67f50209f547" Nov 25 11:09:00 crc kubenswrapper[4769]: I1125 11:09:00.237367 4769 scope.go:117] "RemoveContainer" containerID="8ee70c6e5c00eca7899e4fcf8a105e2ff96534ff2ec995e3b08b51fc6d6c9920" Nov 25 11:09:00 crc kubenswrapper[4769]: I1125 11:09:00.260997 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-6c6f9bd7cc-cp48g" Nov 25 11:09:00 crc kubenswrapper[4769]: I1125 11:09:00.643034 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-662dt" event={"ID":"8002485a-0573-48c9-aeac-f2f5a05cb1ae","Type":"ContainerStarted","Data":"4b4f870f0279f2141ada3749a37082b14e9cc2e21b806f495e65b0e1abf92ec5"} Nov 25 11:09:01 crc kubenswrapper[4769]: I1125 11:09:01.591163 4769 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 11:09:01 crc kubenswrapper[4769]: I1125 11:09:01.591775 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://08c10fa3fd260fed5e22367d730a2ad2c2029b34578d012f2e114280cb9bd700" gracePeriod=5 Nov 25 11:09:02 crc kubenswrapper[4769]: I1125 11:09:02.665700 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9","Type":"ContainerStarted","Data":"65d5adcd4ed95c6c56674989f405efa50395172e954c02de07fcb35d8785961b"} Nov 25 11:09:02 crc kubenswrapper[4769]: I1125 11:09:02.666461 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 11:09:03 crc kubenswrapper[4769]: I1125 11:09:03.237390 4769 scope.go:117] "RemoveContainer" containerID="9144ba2f199bcd9441c8760825218bf572bfc1e5baa1eb66e353688e727c1e8b" Nov 25 11:09:03 crc kubenswrapper[4769]: I1125 11:09:03.676550 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fwc8s" event={"ID":"c4701907-0ccc-4866-bb98-6005539baa95","Type":"ContainerStarted","Data":"ab3699a9ed7cae37ccd8963ca797d9d2b8304ab892a56ea949d1ad8a5f94f35e"} Nov 25 11:09:06 crc kubenswrapper[4769]: I1125 11:09:06.256725 4769 scope.go:117] "RemoveContainer" containerID="f0c1f3186b2503e0a083f68329051df79ef8faacfbe24d4f3ffc198ebb2b5c18" Nov 25 11:09:06 crc kubenswrapper[4769]: I1125 11:09:06.257989 4769 scope.go:117] "RemoveContainer" containerID="c94dc750ec46009b876bdffd1bbff8dd13523ac00762c75027e94e19fa540798" Nov 25 11:09:06 crc kubenswrapper[4769]: E1125 11:09:06.258107 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-68b95954c9-k2htr_openstack-operators(4894deb0-65ca-4b42-b397-4092a75739c9)\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" podUID="4894deb0-65ca-4b42-b397-4092a75739c9" Nov 25 11:09:06 crc kubenswrapper[4769]: E1125 11:09:06.258753 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-2hp9n_openstack-operators(aaa65e3e-75e2-4f50-b9d6-aa9710a6e394)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" podUID="aaa65e3e-75e2-4f50-b9d6-aa9710a6e394" Nov 25 11:09:07 crc kubenswrapper[4769]: I1125 11:09:07.236477 4769 scope.go:117] "RemoveContainer" containerID="a9f56107a7921df8ef77f73698268fffe3e95324cdf2a5e0399a223d81211db8" Nov 25 11:09:07 crc kubenswrapper[4769]: I1125 11:09:07.236506 4769 scope.go:117] "RemoveContainer" containerID="f682d6ba2b3a76e5b33560b16ccf1b9209e6341d4926ea2e556ef1f28310d774" Nov 25 11:09:07 crc kubenswrapper[4769]: I1125 11:09:07.236628 4769 scope.go:117] "RemoveContainer" containerID="dc3cdaff4c0ec1b8b195b88e8e24abf9547700ecdec4a6c0a714c1e76cbe744c" Nov 25 11:09:07 crc kubenswrapper[4769]: I1125 11:09:07.236674 4769 scope.go:117] "RemoveContainer" containerID="c3296068f0f3100d34066d1fa60c281000f795748c9934e078d92db18c9d1246" Nov 25 11:09:07 crc kubenswrapper[4769]: E1125 11:09:07.236834 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-7xcjk_openstack-operators(0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" podUID="0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd" Nov 25 11:09:07 crc kubenswrapper[4769]: E1125 11:09:07.236924 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-kbhzd_openstack-operators(88cb8ad7-c855-45eb-a471-aacb8c42082c)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" podUID="88cb8ad7-c855-45eb-a471-aacb8c42082c" Nov 25 11:09:07 crc kubenswrapper[4769]: E1125 11:09:07.236931 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-47gs9_openstack-operators(b59cac8b-fb36-4316-ab83-da7202b67af5)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" podUID="b59cac8b-fb36-4316-ab83-da7202b67af5" Nov 25 11:09:07 crc kubenswrapper[4769]: I1125 11:09:07.721069 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 11:09:07 crc kubenswrapper[4769]: I1125 11:09:07.721376 4769 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="08c10fa3fd260fed5e22367d730a2ad2c2029b34578d012f2e114280cb9bd700" exitCode=137 Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.083635 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.085232 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.229994 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.230149 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.230179 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.230419 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.230458 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.231921 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.231949 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.231992 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.232028 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.237458 4769 scope.go:117] "RemoveContainer" containerID="7c3380e5b07d625f1dcad94641ead927af8109d9dcb2f314fb8dd4154820908b" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.237898 4769 scope.go:117] "RemoveContainer" containerID="192b50f0ff9ad2e8a1a9e7106cbcdd244285e3bd8f176702a77627bfa9a67045" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.238228 4769 scope.go:117] "RemoveContainer" containerID="334e5a38c2dddecc1080d89fdb35cd17e4414bb3157ba3bd0dfff520a809643a" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.238673 4769 scope.go:117] "RemoveContainer" containerID="d829f0c4de57efa05c5de61329fc2861dac990c010137964537bb483631bfe49" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.238815 4769 scope.go:117] "RemoveContainer" containerID="33ffee1db1abcb01e39cf2f6f963358af41206cf758ac6a6e42b89c43791d7d6" Nov 25 11:09:08 crc kubenswrapper[4769]: E1125 11:09:08.238895 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-m2qxd_openstack-operators(1da948d8-e834-488a-a3ec-a0c0229ebaf5)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" podUID="1da948d8-e834-488a-a3ec-a0c0229ebaf5" Nov 25 11:09:08 crc kubenswrapper[4769]: E1125 11:09:08.239105 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-wssdn_openstack-operators(18e1910e-52b2-439b-a93f-4ffe63a7b992)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" podUID="18e1910e-52b2-439b-a93f-4ffe63a7b992" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.246171 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.248594 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.248818 4769 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.265871 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.265901 4769 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="bdcbede9-ce58-4376-b67f-f3993e7b5f6b" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.282044 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.282081 4769 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="bdcbede9-ce58-4376-b67f-f3993e7b5f6b" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.333576 4769 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.333607 4769 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.333616 4769 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.333624 4769 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.333634 4769 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.735828 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" event={"ID":"67abea47-5e8a-43a2-8865-929cfdfc607c","Type":"ContainerStarted","Data":"64a179fd79d5da66ddd86d78ff4d7530ca9f4de930e5ccc4770806ca6853283f"} Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.736407 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.743642 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" event={"ID":"1be22f03-8697-413b-922c-9344185c05c4","Type":"ContainerStarted","Data":"19dd9247160b7adaf62953334dc0a9e3c50ceb44d847cf05ce2c630319bc5928"} Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.743897 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.746609 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" event={"ID":"9441dbc7-716c-413e-b0ea-bf1ef05b1608","Type":"ContainerStarted","Data":"3d001f041bbb06da656a55f93962d3d752403a2625ad4050b7ede81d53fbf767"} Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.746792 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.748521 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.748637 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.748660 4769 scope.go:117] "RemoveContainer" containerID="08c10fa3fd260fed5e22367d730a2ad2c2029b34578d012f2e114280cb9bd700" Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.755029 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" event={"ID":"6d9be953-34ea-4956-96bf-84d5f8babb2d","Type":"ContainerStarted","Data":"d235fd4ff16f9b3793110c21760071f02d363f77c4cc3a6939d6bfc2edacf6fc"} Nov 25 11:09:08 crc kubenswrapper[4769]: I1125 11:09:08.756189 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 11:09:09 crc kubenswrapper[4769]: I1125 11:09:09.237513 4769 scope.go:117] "RemoveContainer" containerID="836ee524fe136e22b1a4cdee89d7bdc2bac71512f4048a2178fe46c04b9cb5b5" Nov 25 11:09:09 crc kubenswrapper[4769]: I1125 11:09:09.237645 4769 scope.go:117] "RemoveContainer" containerID="7a45361e992b307f0107e532aa2bf871bc434d1574bff681b3b71afac7d7e53a" Nov 25 11:09:09 crc kubenswrapper[4769]: E1125 11:09:09.238072 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-zv5xr_openstack-operators(b90cc789-8211-48bc-85cc-1a31ad1af486)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" podUID="b90cc789-8211-48bc-85cc-1a31ad1af486" Nov 25 11:09:09 crc kubenswrapper[4769]: I1125 11:09:09.770499 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" event={"ID":"9d0ef7c9-7421-4fe2-b1c8-551253bea174","Type":"ContainerStarted","Data":"738e7aaf749e7bb878073b7537da577f5d68f71e883dce64a41113c3fb9a1cca"} Nov 25 11:09:09 crc kubenswrapper[4769]: I1125 11:09:09.771202 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 11:09:10 crc kubenswrapper[4769]: I1125 11:09:10.243835 4769 scope.go:117] "RemoveContainer" containerID="ade37617c9c8ca458df14d6683e467d37c8e17a1a4f94a5b4006aa7618e78c7b" Nov 25 11:09:10 crc kubenswrapper[4769]: I1125 11:09:10.244706 4769 scope.go:117] "RemoveContainer" containerID="bdca6b56630d0dc81232cbaf70ba4fb1dda827a4b73883e5cce0ce5cb4a5a6e4" Nov 25 11:09:10 crc kubenswrapper[4769]: I1125 11:09:10.805867 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" event={"ID":"a2f1ad69-27e4-4131-a742-a8d2c5df8636","Type":"ContainerStarted","Data":"9dd5b7f6f98b89a59e386c572b4b17a758d4f48997de188bcf1c0de36b3c7254"} Nov 25 11:09:10 crc kubenswrapper[4769]: I1125 11:09:10.807277 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 11:09:10 crc kubenswrapper[4769]: I1125 11:09:10.811621 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" event={"ID":"63e66921-47a7-407a-b50e-06cf5cadb8be","Type":"ContainerStarted","Data":"3d503f7ba259e0eac7412d58e1c1f849ae5b3c5fd4914f3903b32daa5ad539a9"} Nov 25 11:09:11 crc kubenswrapper[4769]: I1125 11:09:11.237744 4769 scope.go:117] "RemoveContainer" containerID="c533a1589704ed52ed006ecd3bc686cb379289081ed185a1304a7c12882672b5" Nov 25 11:09:11 crc kubenswrapper[4769]: I1125 11:09:11.237880 4769 scope.go:117] "RemoveContainer" containerID="3ec051fc18a0ed5dd205e14155e6b88e962ed8ef7f45b098b1083440ef362933" Nov 25 11:09:11 crc kubenswrapper[4769]: I1125 11:09:11.238867 4769 scope.go:117] "RemoveContainer" containerID="08e2200c2a0a7ac276416be303052b5384f3e7af2792ff6e63217f7e655f64e3" Nov 25 11:09:11 crc kubenswrapper[4769]: I1125 11:09:11.823760 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" event={"ID":"8e7436d0-2ff7-4a11-9ab8-74a91e56de4a","Type":"ContainerStarted","Data":"c8490f5622162569335f7c2b350b0f77a94ba06fb79568c7862af1c88e5efda6"} Nov 25 11:09:11 crc kubenswrapper[4769]: I1125 11:09:11.825726 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 11:09:11 crc kubenswrapper[4769]: I1125 11:09:11.829280 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" event={"ID":"1b8cd25d-43dd-4774-b1d9-59572bb6bef7","Type":"ContainerStarted","Data":"f9135450bbc27f2d76cfcad26c7d289559d8617ba5ad6dce51f268f61407749d"} Nov 25 11:09:11 crc kubenswrapper[4769]: I1125 11:09:11.829674 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 11:09:11 crc kubenswrapper[4769]: I1125 11:09:11.833268 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" event={"ID":"1f5735dc-67e5-423c-9a8f-d42977c892d3","Type":"ContainerStarted","Data":"2589065ae41e4d527288cfdeac4742d07ef1590d34e61eab5b65921aa4c6fc7e"} Nov 25 11:09:11 crc kubenswrapper[4769]: I1125 11:09:11.833878 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 11:09:12 crc kubenswrapper[4769]: I1125 11:09:12.243957 4769 scope.go:117] "RemoveContainer" containerID="4819bf42ef924a4f4c1452e05c4935251a8a3a0b3104d56ba98796f9cabc7256" Nov 25 11:09:12 crc kubenswrapper[4769]: I1125 11:09:12.849886 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" event={"ID":"136f8f90-5673-4a08-ab4b-c030c1c428a6","Type":"ContainerStarted","Data":"289b1142937b37ee0b9066a5bc22886dc79fe89574b53af3d49a3409a2d96a4b"} Nov 25 11:09:12 crc kubenswrapper[4769]: I1125 11:09:12.851246 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 11:09:13 crc kubenswrapper[4769]: I1125 11:09:13.236721 4769 scope.go:117] "RemoveContainer" containerID="f085586e097d99e6a0130457fdecb9ff7a0569323a0150618c6a97dc4980df25" Nov 25 11:09:13 crc kubenswrapper[4769]: I1125 11:09:13.862098 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bzrbr" event={"ID":"a032414e-4be2-47f7-ac88-3bdec0ccb151","Type":"ContainerStarted","Data":"73a0ed0d55528053b37e9786d28ffc06d48d76984f85db1eb475f94bd37dd5bf"} Nov 25 11:09:15 crc kubenswrapper[4769]: I1125 11:09:15.193543 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-774b86978c-w8ftq" Nov 25 11:09:15 crc kubenswrapper[4769]: I1125 11:09:15.556791 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 11:09:15 crc kubenswrapper[4769]: I1125 11:09:15.561012 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-qdjsd" Nov 25 11:09:15 crc kubenswrapper[4769]: I1125 11:09:15.608586 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-fknhz" Nov 25 11:09:15 crc kubenswrapper[4769]: I1125 11:09:15.696338 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-rgn2z" Nov 25 11:09:15 crc kubenswrapper[4769]: I1125 11:09:15.702545 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 11:09:15 crc kubenswrapper[4769]: I1125 11:09:15.871534 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-68bvf" Nov 25 11:09:15 crc kubenswrapper[4769]: I1125 11:09:15.925539 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-g8xqz" Nov 25 11:09:16 crc kubenswrapper[4769]: I1125 11:09:16.345494 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-864885998-rz7fl" Nov 25 11:09:17 crc kubenswrapper[4769]: I1125 11:09:17.236838 4769 scope.go:117] "RemoveContainer" containerID="c94dc750ec46009b876bdffd1bbff8dd13523ac00762c75027e94e19fa540798" Nov 25 11:09:17 crc kubenswrapper[4769]: I1125 11:09:17.236946 4769 scope.go:117] "RemoveContainer" containerID="f0c1f3186b2503e0a083f68329051df79ef8faacfbe24d4f3ffc198ebb2b5c18" Nov 25 11:09:17 crc kubenswrapper[4769]: I1125 11:09:17.925164 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" event={"ID":"aaa65e3e-75e2-4f50-b9d6-aa9710a6e394","Type":"ContainerStarted","Data":"bee006486e0bb194dcee2be48d694f4a8302f583b120db9114aa3ff61152cc6f"} Nov 25 11:09:17 crc kubenswrapper[4769]: I1125 11:09:17.925835 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 11:09:17 crc kubenswrapper[4769]: I1125 11:09:17.933598 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" event={"ID":"4894deb0-65ca-4b42-b397-4092a75739c9","Type":"ContainerStarted","Data":"ffb835164bb0d3aa3afdf0d619abbedf7a67f7a2824d89efe2fc9b0ce4062032"} Nov 25 11:09:17 crc kubenswrapper[4769]: I1125 11:09:17.933928 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.743938 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9v54j"] Nov 25 11:09:18 crc kubenswrapper[4769]: E1125 11:09:18.744854 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.744877 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 11:09:18 crc kubenswrapper[4769]: E1125 11:09:18.744921 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" containerName="installer" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.744930 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" containerName="installer" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.745327 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.745376 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="7264b0b4-0f02-4841-9b8a-5f247b3c42f1" containerName="installer" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.747720 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.768499 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9v54j"] Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.821554 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-catalog-content\") pod \"redhat-operators-9v54j\" (UID: \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\") " pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.821651 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-utilities\") pod \"redhat-operators-9v54j\" (UID: \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\") " pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.821727 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbbql\" (UniqueName: \"kubernetes.io/projected/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-kube-api-access-rbbql\") pod \"redhat-operators-9v54j\" (UID: \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\") " pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.924262 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbbql\" (UniqueName: \"kubernetes.io/projected/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-kube-api-access-rbbql\") pod \"redhat-operators-9v54j\" (UID: \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\") " pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.925018 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-catalog-content\") pod \"redhat-operators-9v54j\" (UID: \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\") " pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.925252 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-utilities\") pod \"redhat-operators-9v54j\" (UID: \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\") " pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.925570 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-catalog-content\") pod \"redhat-operators-9v54j\" (UID: \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\") " pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.925651 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-utilities\") pod \"redhat-operators-9v54j\" (UID: \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\") " pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:09:18 crc kubenswrapper[4769]: I1125 11:09:18.948855 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbbql\" (UniqueName: \"kubernetes.io/projected/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-kube-api-access-rbbql\") pod \"redhat-operators-9v54j\" (UID: \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\") " pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.101513 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.245535 4769 scope.go:117] "RemoveContainer" containerID="d829f0c4de57efa05c5de61329fc2861dac990c010137964537bb483631bfe49" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.246415 4769 scope.go:117] "RemoveContainer" containerID="c3296068f0f3100d34066d1fa60c281000f795748c9934e078d92db18c9d1246" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.633676 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gkx9w"] Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.637396 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.652547 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gkx9w"] Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.754706 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lw9wf\" (UniqueName: \"kubernetes.io/projected/f1b1fb95-30bd-457e-b59c-c06e6b770f42-kube-api-access-lw9wf\") pod \"redhat-operators-gkx9w\" (UID: \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\") " pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.759285 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1b1fb95-30bd-457e-b59c-c06e6b770f42-utilities\") pod \"redhat-operators-gkx9w\" (UID: \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\") " pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.767940 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1b1fb95-30bd-457e-b59c-c06e6b770f42-catalog-content\") pod \"redhat-operators-gkx9w\" (UID: \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\") " pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.770737 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9v54j"] Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.870796 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1b1fb95-30bd-457e-b59c-c06e6b770f42-catalog-content\") pod \"redhat-operators-gkx9w\" (UID: \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\") " pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.871054 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lw9wf\" (UniqueName: \"kubernetes.io/projected/f1b1fb95-30bd-457e-b59c-c06e6b770f42-kube-api-access-lw9wf\") pod \"redhat-operators-gkx9w\" (UID: \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\") " pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.871091 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1b1fb95-30bd-457e-b59c-c06e6b770f42-utilities\") pod \"redhat-operators-gkx9w\" (UID: \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\") " pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.871436 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1b1fb95-30bd-457e-b59c-c06e6b770f42-catalog-content\") pod \"redhat-operators-gkx9w\" (UID: \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\") " pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.871638 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1b1fb95-30bd-457e-b59c-c06e6b770f42-utilities\") pod \"redhat-operators-gkx9w\" (UID: \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\") " pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.891691 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lw9wf\" (UniqueName: \"kubernetes.io/projected/f1b1fb95-30bd-457e-b59c-c06e6b770f42-kube-api-access-lw9wf\") pod \"redhat-operators-gkx9w\" (UID: \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\") " pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:09:19 crc kubenswrapper[4769]: I1125 11:09:19.963424 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:09:20 crc kubenswrapper[4769]: I1125 11:09:20.004434 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" event={"ID":"b59cac8b-fb36-4316-ab83-da7202b67af5","Type":"ContainerStarted","Data":"b43be780a2f5ad49abf5b30cb23c02efc103a336b71fe6a9637cb89d5d1dd584"} Nov 25 11:09:20 crc kubenswrapper[4769]: I1125 11:09:20.007076 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 11:09:20 crc kubenswrapper[4769]: I1125 11:09:20.063272 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" event={"ID":"1da948d8-e834-488a-a3ec-a0c0229ebaf5","Type":"ContainerStarted","Data":"e0474bca6d2a1221ced2e7208fdda732dbfb60e3d1f8af45b962c8210ac7600b"} Nov 25 11:09:20 crc kubenswrapper[4769]: I1125 11:09:20.065158 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 11:09:20 crc kubenswrapper[4769]: I1125 11:09:20.086192 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9v54j" event={"ID":"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe","Type":"ContainerStarted","Data":"1205df62446f302424f6769a0877b5e5e1601534721e04f29f330d98d3938947"} Nov 25 11:09:20 crc kubenswrapper[4769]: I1125 11:09:20.581486 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gkx9w"] Nov 25 11:09:20 crc kubenswrapper[4769]: I1125 11:09:20.821694 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-856hd"] Nov 25 11:09:20 crc kubenswrapper[4769]: I1125 11:09:20.824806 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:09:20 crc kubenswrapper[4769]: I1125 11:09:20.837400 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-856hd"] Nov 25 11:09:20 crc kubenswrapper[4769]: I1125 11:09:20.904009 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-utilities\") pod \"redhat-operators-856hd\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:09:20 crc kubenswrapper[4769]: I1125 11:09:20.904094 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q79c4\" (UniqueName: \"kubernetes.io/projected/4a6225f9-cdc5-486a-b813-db81a752fade-kube-api-access-q79c4\") pod \"redhat-operators-856hd\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:09:20 crc kubenswrapper[4769]: I1125 11:09:20.904208 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-catalog-content\") pod \"redhat-operators-856hd\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.006859 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-utilities\") pod \"redhat-operators-856hd\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.006926 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q79c4\" (UniqueName: \"kubernetes.io/projected/4a6225f9-cdc5-486a-b813-db81a752fade-kube-api-access-q79c4\") pod \"redhat-operators-856hd\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.007047 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-catalog-content\") pod \"redhat-operators-856hd\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.007476 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-utilities\") pod \"redhat-operators-856hd\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.007513 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-catalog-content\") pod \"redhat-operators-856hd\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.032996 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q79c4\" (UniqueName: \"kubernetes.io/projected/4a6225f9-cdc5-486a-b813-db81a752fade-kube-api-access-q79c4\") pod \"redhat-operators-856hd\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.102093 4769 generic.go:334] "Generic (PLEG): container finished" podID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerID="1c744ee3bfbaaa52bb9931f19c5a7da7ce606ce6963f8f5453d6d0c393f93146" exitCode=0 Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.102145 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkx9w" event={"ID":"f1b1fb95-30bd-457e-b59c-c06e6b770f42","Type":"ContainerDied","Data":"1c744ee3bfbaaa52bb9931f19c5a7da7ce606ce6963f8f5453d6d0c393f93146"} Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.102191 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkx9w" event={"ID":"f1b1fb95-30bd-457e-b59c-c06e6b770f42","Type":"ContainerStarted","Data":"296ceb0fb976757174aefb38081c1f4e2cb575e1e88ee597c305471bc06acd1b"} Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.105242 4769 generic.go:334] "Generic (PLEG): container finished" podID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerID="29250a081b2e2ceb2220a152dffa21d16bcb7ae8f7b53d1cc048a0ba38317e17" exitCode=0 Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.105305 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9v54j" event={"ID":"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe","Type":"ContainerDied","Data":"29250a081b2e2ceb2220a152dffa21d16bcb7ae8f7b53d1cc048a0ba38317e17"} Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.183520 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.237029 4769 scope.go:117] "RemoveContainer" containerID="dc3cdaff4c0ec1b8b195b88e8e24abf9547700ecdec4a6c0a714c1e76cbe744c" Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.237068 4769 scope.go:117] "RemoveContainer" containerID="7a45361e992b307f0107e532aa2bf871bc434d1574bff681b3b71afac7d7e53a" Nov 25 11:09:21 crc kubenswrapper[4769]: W1125 11:09:21.854919 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a6225f9_cdc5_486a_b813_db81a752fade.slice/crio-a2daff87d522bc692ed1663f2f67614d72e59b59571615f73ee110f46c5f9a01 WatchSource:0}: Error finding container a2daff87d522bc692ed1663f2f67614d72e59b59571615f73ee110f46c5f9a01: Status 404 returned error can't find the container with id a2daff87d522bc692ed1663f2f67614d72e59b59571615f73ee110f46c5f9a01 Nov 25 11:09:21 crc kubenswrapper[4769]: I1125 11:09:21.859522 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-856hd"] Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.025562 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vr22m"] Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.029114 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.044147 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vr22m"] Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.133327 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-856hd" event={"ID":"4a6225f9-cdc5-486a-b813-db81a752fade","Type":"ContainerStarted","Data":"a2daff87d522bc692ed1663f2f67614d72e59b59571615f73ee110f46c5f9a01"} Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.139715 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" event={"ID":"88cb8ad7-c855-45eb-a471-aacb8c42082c","Type":"ContainerStarted","Data":"162ce6ec7376a908bd1babaed65c77b5a2dc4a051d264f5a1704afed0b8c4923"} Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.141107 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.144477 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" event={"ID":"b90cc789-8211-48bc-85cc-1a31ad1af486","Type":"ContainerStarted","Data":"83ec6237ec47ff2ecc0eb7b2f24702fe13103781af336097568a8177c0b01f42"} Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.144764 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.145511 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9d04b78-f610-43ce-bc66-ca181acf6654-utilities\") pod \"redhat-operators-vr22m\" (UID: \"e9d04b78-f610-43ce-bc66-ca181acf6654\") " pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.145759 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz8zv\" (UniqueName: \"kubernetes.io/projected/e9d04b78-f610-43ce-bc66-ca181acf6654-kube-api-access-rz8zv\") pod \"redhat-operators-vr22m\" (UID: \"e9d04b78-f610-43ce-bc66-ca181acf6654\") " pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.146063 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9d04b78-f610-43ce-bc66-ca181acf6654-catalog-content\") pod \"redhat-operators-vr22m\" (UID: \"e9d04b78-f610-43ce-bc66-ca181acf6654\") " pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.248631 4769 scope.go:117] "RemoveContainer" containerID="a9f56107a7921df8ef77f73698268fffe3e95324cdf2a5e0399a223d81211db8" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.253700 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9d04b78-f610-43ce-bc66-ca181acf6654-catalog-content\") pod \"redhat-operators-vr22m\" (UID: \"e9d04b78-f610-43ce-bc66-ca181acf6654\") " pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.253944 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9d04b78-f610-43ce-bc66-ca181acf6654-utilities\") pod \"redhat-operators-vr22m\" (UID: \"e9d04b78-f610-43ce-bc66-ca181acf6654\") " pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.254118 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz8zv\" (UniqueName: \"kubernetes.io/projected/e9d04b78-f610-43ce-bc66-ca181acf6654-kube-api-access-rz8zv\") pod \"redhat-operators-vr22m\" (UID: \"e9d04b78-f610-43ce-bc66-ca181acf6654\") " pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.254377 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9d04b78-f610-43ce-bc66-ca181acf6654-catalog-content\") pod \"redhat-operators-vr22m\" (UID: \"e9d04b78-f610-43ce-bc66-ca181acf6654\") " pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.255073 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9d04b78-f610-43ce-bc66-ca181acf6654-utilities\") pod \"redhat-operators-vr22m\" (UID: \"e9d04b78-f610-43ce-bc66-ca181acf6654\") " pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.279116 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz8zv\" (UniqueName: \"kubernetes.io/projected/e9d04b78-f610-43ce-bc66-ca181acf6654-kube-api-access-rz8zv\") pod \"redhat-operators-vr22m\" (UID: \"e9d04b78-f610-43ce-bc66-ca181acf6654\") " pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.294426 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.294483 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.294526 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.295806 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.295861 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" gracePeriod=600 Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.434251 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:09:22 crc kubenswrapper[4769]: E1125 11:09:22.437167 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:09:22 crc kubenswrapper[4769]: I1125 11:09:22.964675 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vr22m"] Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.158373 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkx9w" event={"ID":"f1b1fb95-30bd-457e-b59c-c06e6b770f42","Type":"ContainerStarted","Data":"2846aa87a8fb70c18047658bbab361766ef69a9d93f19ad2dc263e2db41e2321"} Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.161758 4769 generic.go:334] "Generic (PLEG): container finished" podID="4a6225f9-cdc5-486a-b813-db81a752fade" containerID="d19f0987c306e9ccf6983d6f68aa32b10622159f9e820112ac1eef4fdd935704" exitCode=0 Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.161771 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-856hd" event={"ID":"4a6225f9-cdc5-486a-b813-db81a752fade","Type":"ContainerDied","Data":"d19f0987c306e9ccf6983d6f68aa32b10622159f9e820112ac1eef4fdd935704"} Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.163745 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vr22m" event={"ID":"e9d04b78-f610-43ce-bc66-ca181acf6654","Type":"ContainerStarted","Data":"b5a5e869a98f1eca074fe92b7132c12104eed2851feeeeb5a63de9eac7d150a7"} Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.166491 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9v54j" event={"ID":"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe","Type":"ContainerStarted","Data":"d632444cd9b6b05bdddaf9e5883e0e245e2d64c8856e489fca8a481aa146d254"} Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.170420 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" event={"ID":"0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd","Type":"ContainerStarted","Data":"d816f9ecf535f9dc5cae578975f1ec0eeb938e514b10bffe403b1a62c48d33c4"} Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.170614 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.235954 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h2wlq"] Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.236469 4769 scope.go:117] "RemoveContainer" containerID="33ffee1db1abcb01e39cf2f6f963358af41206cf758ac6a6e42b89c43791d7d6" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.238885 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" exitCode=0 Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.239140 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5"} Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.239177 4769 scope.go:117] "RemoveContainer" containerID="a9023e5a831ec6d39937fcd0f7843ba8dde118a0ee018068576d8f5bb54ffae1" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.239305 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.241209 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:09:23 crc kubenswrapper[4769]: E1125 11:09:23.241758 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.256843 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h2wlq"] Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.382670 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/993bc795-fde2-4876-9e76-e97a6095576d-utilities\") pod \"redhat-operators-h2wlq\" (UID: \"993bc795-fde2-4876-9e76-e97a6095576d\") " pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.383155 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/993bc795-fde2-4876-9e76-e97a6095576d-catalog-content\") pod \"redhat-operators-h2wlq\" (UID: \"993bc795-fde2-4876-9e76-e97a6095576d\") " pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.383212 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85t7t\" (UniqueName: \"kubernetes.io/projected/993bc795-fde2-4876-9e76-e97a6095576d-kube-api-access-85t7t\") pod \"redhat-operators-h2wlq\" (UID: \"993bc795-fde2-4876-9e76-e97a6095576d\") " pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.484984 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/993bc795-fde2-4876-9e76-e97a6095576d-utilities\") pod \"redhat-operators-h2wlq\" (UID: \"993bc795-fde2-4876-9e76-e97a6095576d\") " pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.485090 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/993bc795-fde2-4876-9e76-e97a6095576d-catalog-content\") pod \"redhat-operators-h2wlq\" (UID: \"993bc795-fde2-4876-9e76-e97a6095576d\") " pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.485302 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85t7t\" (UniqueName: \"kubernetes.io/projected/993bc795-fde2-4876-9e76-e97a6095576d-kube-api-access-85t7t\") pod \"redhat-operators-h2wlq\" (UID: \"993bc795-fde2-4876-9e76-e97a6095576d\") " pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.485705 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/993bc795-fde2-4876-9e76-e97a6095576d-utilities\") pod \"redhat-operators-h2wlq\" (UID: \"993bc795-fde2-4876-9e76-e97a6095576d\") " pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.487101 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/993bc795-fde2-4876-9e76-e97a6095576d-catalog-content\") pod \"redhat-operators-h2wlq\" (UID: \"993bc795-fde2-4876-9e76-e97a6095576d\") " pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.515739 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85t7t\" (UniqueName: \"kubernetes.io/projected/993bc795-fde2-4876-9e76-e97a6095576d-kube-api-access-85t7t\") pod \"redhat-operators-h2wlq\" (UID: \"993bc795-fde2-4876-9e76-e97a6095576d\") " pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:09:23 crc kubenswrapper[4769]: I1125 11:09:23.574175 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:09:24 crc kubenswrapper[4769]: I1125 11:09:24.251992 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" event={"ID":"18e1910e-52b2-439b-a93f-4ffe63a7b992","Type":"ContainerStarted","Data":"1040ebd131e122ccd27f7c59849feb8875d07688ab290529908a09c42850ea08"} Nov 25 11:09:24 crc kubenswrapper[4769]: I1125 11:09:24.252607 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 11:09:24 crc kubenswrapper[4769]: I1125 11:09:24.254929 4769 generic.go:334] "Generic (PLEG): container finished" podID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerID="96fd247d2f8cb17f2c17c5bdd0fdd8a11720d0e88d1f6adb71fcd0dde0eb9b99" exitCode=0 Nov 25 11:09:24 crc kubenswrapper[4769]: I1125 11:09:24.255037 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vr22m" event={"ID":"e9d04b78-f610-43ce-bc66-ca181acf6654","Type":"ContainerDied","Data":"96fd247d2f8cb17f2c17c5bdd0fdd8a11720d0e88d1f6adb71fcd0dde0eb9b99"} Nov 25 11:09:24 crc kubenswrapper[4769]: I1125 11:09:24.337299 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h2wlq"] Nov 25 11:09:24 crc kubenswrapper[4769]: W1125 11:09:24.349910 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod993bc795_fde2_4876_9e76_e97a6095576d.slice/crio-bd331a544245279f04df5134a181d238a472359c28a0f098c0e6628a19905626 WatchSource:0}: Error finding container bd331a544245279f04df5134a181d238a472359c28a0f098c0e6628a19905626: Status 404 returned error can't find the container with id bd331a544245279f04df5134a181d238a472359c28a0f098c0e6628a19905626 Nov 25 11:09:24 crc kubenswrapper[4769]: I1125 11:09:24.834038 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9hv66"] Nov 25 11:09:24 crc kubenswrapper[4769]: I1125 11:09:24.837073 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:09:24 crc kubenswrapper[4769]: I1125 11:09:24.851124 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9hv66"] Nov 25 11:09:24 crc kubenswrapper[4769]: I1125 11:09:24.929462 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e334767-99a9-4a84-bcd7-e3e53d56750a-utilities\") pod \"redhat-operators-9hv66\" (UID: \"7e334767-99a9-4a84-bcd7-e3e53d56750a\") " pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:09:24 crc kubenswrapper[4769]: I1125 11:09:24.929534 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e334767-99a9-4a84-bcd7-e3e53d56750a-catalog-content\") pod \"redhat-operators-9hv66\" (UID: \"7e334767-99a9-4a84-bcd7-e3e53d56750a\") " pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:09:24 crc kubenswrapper[4769]: I1125 11:09:24.929896 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xs6l2\" (UniqueName: \"kubernetes.io/projected/7e334767-99a9-4a84-bcd7-e3e53d56750a-kube-api-access-xs6l2\") pod \"redhat-operators-9hv66\" (UID: \"7e334767-99a9-4a84-bcd7-e3e53d56750a\") " pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.013173 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-2hp9n" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.032356 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e334767-99a9-4a84-bcd7-e3e53d56750a-utilities\") pod \"redhat-operators-9hv66\" (UID: \"7e334767-99a9-4a84-bcd7-e3e53d56750a\") " pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.032430 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e334767-99a9-4a84-bcd7-e3e53d56750a-catalog-content\") pod \"redhat-operators-9hv66\" (UID: \"7e334767-99a9-4a84-bcd7-e3e53d56750a\") " pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.032634 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xs6l2\" (UniqueName: \"kubernetes.io/projected/7e334767-99a9-4a84-bcd7-e3e53d56750a-kube-api-access-xs6l2\") pod \"redhat-operators-9hv66\" (UID: \"7e334767-99a9-4a84-bcd7-e3e53d56750a\") " pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.032830 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e334767-99a9-4a84-bcd7-e3e53d56750a-utilities\") pod \"redhat-operators-9hv66\" (UID: \"7e334767-99a9-4a84-bcd7-e3e53d56750a\") " pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.032992 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e334767-99a9-4a84-bcd7-e3e53d56750a-catalog-content\") pod \"redhat-operators-9hv66\" (UID: \"7e334767-99a9-4a84-bcd7-e3e53d56750a\") " pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.076058 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xs6l2\" (UniqueName: \"kubernetes.io/projected/7e334767-99a9-4a84-bcd7-e3e53d56750a-kube-api-access-xs6l2\") pod \"redhat-operators-9hv66\" (UID: \"7e334767-99a9-4a84-bcd7-e3e53d56750a\") " pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.152416 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-k2htr" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.172933 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.250560 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-m2qxd" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.307340 4769 generic.go:334] "Generic (PLEG): container finished" podID="993bc795-fde2-4876-9e76-e97a6095576d" containerID="86be9b2a0082a70d05cd99924bcb2f3f2f50320fc099212413129a320bbc6206" exitCode=0 Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.307934 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h2wlq" event={"ID":"993bc795-fde2-4876-9e76-e97a6095576d","Type":"ContainerDied","Data":"86be9b2a0082a70d05cd99924bcb2f3f2f50320fc099212413129a320bbc6206"} Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.307982 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h2wlq" event={"ID":"993bc795-fde2-4876-9e76-e97a6095576d","Type":"ContainerStarted","Data":"bd331a544245279f04df5134a181d238a472359c28a0f098c0e6628a19905626"} Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.319029 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-856hd" event={"ID":"4a6225f9-cdc5-486a-b813-db81a752fade","Type":"ContainerStarted","Data":"752cb86da8fecd9cd08ae2eb2fb21f293e404c471b4243d3fc085b86ae4ad013"} Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.477404 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-47gs9" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.527247 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-hh9nc" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.753147 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rcv4n" Nov 25 11:09:25 crc kubenswrapper[4769]: I1125 11:09:25.950350 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9hv66"] Nov 25 11:09:25 crc kubenswrapper[4769]: W1125 11:09:25.952109 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e334767_99a9_4a84_bcd7_e3e53d56750a.slice/crio-6ab6771c75c22ab79c6906ff36a11a6d867af22a27e9ab2fce7235739ce00516 WatchSource:0}: Error finding container 6ab6771c75c22ab79c6906ff36a11a6d867af22a27e9ab2fce7235739ce00516: Status 404 returned error can't find the container with id 6ab6771c75c22ab79c6906ff36a11a6d867af22a27e9ab2fce7235739ce00516 Nov 25 11:09:26 crc kubenswrapper[4769]: I1125 11:09:26.330101 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hv66" event={"ID":"7e334767-99a9-4a84-bcd7-e3e53d56750a","Type":"ContainerStarted","Data":"6ab6771c75c22ab79c6906ff36a11a6d867af22a27e9ab2fce7235739ce00516"} Nov 25 11:09:26 crc kubenswrapper[4769]: I1125 11:09:26.823741 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-nktn6" Nov 25 11:09:27 crc kubenswrapper[4769]: I1125 11:09:27.343110 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hv66" event={"ID":"7e334767-99a9-4a84-bcd7-e3e53d56750a","Type":"ContainerStarted","Data":"6a0f31a303cbac1952aaceb147ea5c206337f05102ed0c6483a942c0b3bd912a"} Nov 25 11:09:28 crc kubenswrapper[4769]: I1125 11:09:28.358724 4769 generic.go:334] "Generic (PLEG): container finished" podID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerID="6a0f31a303cbac1952aaceb147ea5c206337f05102ed0c6483a942c0b3bd912a" exitCode=0 Nov 25 11:09:28 crc kubenswrapper[4769]: I1125 11:09:28.359036 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hv66" event={"ID":"7e334767-99a9-4a84-bcd7-e3e53d56750a","Type":"ContainerDied","Data":"6a0f31a303cbac1952aaceb147ea5c206337f05102ed0c6483a942c0b3bd912a"} Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.404892 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gwvnd"] Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.408125 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.422367 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gwvnd"] Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.465442 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7gh8\" (UniqueName: \"kubernetes.io/projected/e588820f-3563-4b98-9ba4-85da1cee2821-kube-api-access-r7gh8\") pod \"redhat-operators-gwvnd\" (UID: \"e588820f-3563-4b98-9ba4-85da1cee2821\") " pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.465814 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e588820f-3563-4b98-9ba4-85da1cee2821-catalog-content\") pod \"redhat-operators-gwvnd\" (UID: \"e588820f-3563-4b98-9ba4-85da1cee2821\") " pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.465877 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e588820f-3563-4b98-9ba4-85da1cee2821-utilities\") pod \"redhat-operators-gwvnd\" (UID: \"e588820f-3563-4b98-9ba4-85da1cee2821\") " pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.567477 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e588820f-3563-4b98-9ba4-85da1cee2821-catalog-content\") pod \"redhat-operators-gwvnd\" (UID: \"e588820f-3563-4b98-9ba4-85da1cee2821\") " pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.567554 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e588820f-3563-4b98-9ba4-85da1cee2821-utilities\") pod \"redhat-operators-gwvnd\" (UID: \"e588820f-3563-4b98-9ba4-85da1cee2821\") " pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.567690 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7gh8\" (UniqueName: \"kubernetes.io/projected/e588820f-3563-4b98-9ba4-85da1cee2821-kube-api-access-r7gh8\") pod \"redhat-operators-gwvnd\" (UID: \"e588820f-3563-4b98-9ba4-85da1cee2821\") " pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.568193 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e588820f-3563-4b98-9ba4-85da1cee2821-catalog-content\") pod \"redhat-operators-gwvnd\" (UID: \"e588820f-3563-4b98-9ba4-85da1cee2821\") " pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.568277 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e588820f-3563-4b98-9ba4-85da1cee2821-utilities\") pod \"redhat-operators-gwvnd\" (UID: \"e588820f-3563-4b98-9ba4-85da1cee2821\") " pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.621685 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7gh8\" (UniqueName: \"kubernetes.io/projected/e588820f-3563-4b98-9ba4-85da1cee2821-kube-api-access-r7gh8\") pod \"redhat-operators-gwvnd\" (UID: \"e588820f-3563-4b98-9ba4-85da1cee2821\") " pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:09:29 crc kubenswrapper[4769]: I1125 11:09:29.883074 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.384149 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vr22m" event={"ID":"e9d04b78-f610-43ce-bc66-ca181acf6654","Type":"ContainerStarted","Data":"543eaf3d2bfcd44951d40cc61618b80d30d796e12a6f97d497bcc3f393499c9d"} Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.386756 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h2wlq" event={"ID":"993bc795-fde2-4876-9e76-e97a6095576d","Type":"ContainerStarted","Data":"667cf0a9aad845523b83761ec7e6b90f49da63f32b87d16da5405b0dc2032a15"} Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.434672 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h57kt"] Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.440257 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.466550 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h57kt"] Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.589916 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af5c9899-c379-4d60-9aae-979a20de2ad2-utilities\") pod \"redhat-operators-h57kt\" (UID: \"af5c9899-c379-4d60-9aae-979a20de2ad2\") " pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.590013 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4x9j\" (UniqueName: \"kubernetes.io/projected/af5c9899-c379-4d60-9aae-979a20de2ad2-kube-api-access-z4x9j\") pod \"redhat-operators-h57kt\" (UID: \"af5c9899-c379-4d60-9aae-979a20de2ad2\") " pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.590044 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af5c9899-c379-4d60-9aae-979a20de2ad2-catalog-content\") pod \"redhat-operators-h57kt\" (UID: \"af5c9899-c379-4d60-9aae-979a20de2ad2\") " pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.692447 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af5c9899-c379-4d60-9aae-979a20de2ad2-utilities\") pod \"redhat-operators-h57kt\" (UID: \"af5c9899-c379-4d60-9aae-979a20de2ad2\") " pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.692531 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4x9j\" (UniqueName: \"kubernetes.io/projected/af5c9899-c379-4d60-9aae-979a20de2ad2-kube-api-access-z4x9j\") pod \"redhat-operators-h57kt\" (UID: \"af5c9899-c379-4d60-9aae-979a20de2ad2\") " pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.692564 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af5c9899-c379-4d60-9aae-979a20de2ad2-catalog-content\") pod \"redhat-operators-h57kt\" (UID: \"af5c9899-c379-4d60-9aae-979a20de2ad2\") " pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.693499 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af5c9899-c379-4d60-9aae-979a20de2ad2-catalog-content\") pod \"redhat-operators-h57kt\" (UID: \"af5c9899-c379-4d60-9aae-979a20de2ad2\") " pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:09:30 crc kubenswrapper[4769]: I1125 11:09:30.693541 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af5c9899-c379-4d60-9aae-979a20de2ad2-utilities\") pod \"redhat-operators-h57kt\" (UID: \"af5c9899-c379-4d60-9aae-979a20de2ad2\") " pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.387065 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4x9j\" (UniqueName: \"kubernetes.io/projected/af5c9899-c379-4d60-9aae-979a20de2ad2-kube-api-access-z4x9j\") pod \"redhat-operators-h57kt\" (UID: \"af5c9899-c379-4d60-9aae-979a20de2ad2\") " pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.623095 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ccknj"] Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.682808 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.696102 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ccknj"] Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.696215 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.821337 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfkjl\" (UniqueName: \"kubernetes.io/projected/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-kube-api-access-mfkjl\") pod \"redhat-operators-ccknj\" (UID: \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\") " pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.821420 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-catalog-content\") pod \"redhat-operators-ccknj\" (UID: \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\") " pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.821479 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-utilities\") pod \"redhat-operators-ccknj\" (UID: \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\") " pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.927796 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfkjl\" (UniqueName: \"kubernetes.io/projected/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-kube-api-access-mfkjl\") pod \"redhat-operators-ccknj\" (UID: \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\") " pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.928201 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-catalog-content\") pod \"redhat-operators-ccknj\" (UID: \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\") " pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.928264 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-utilities\") pod \"redhat-operators-ccknj\" (UID: \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\") " pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.928838 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-utilities\") pod \"redhat-operators-ccknj\" (UID: \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\") " pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.929297 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-catalog-content\") pod \"redhat-operators-ccknj\" (UID: \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\") " pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:09:31 crc kubenswrapper[4769]: I1125 11:09:31.968259 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfkjl\" (UniqueName: \"kubernetes.io/projected/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-kube-api-access-mfkjl\") pod \"redhat-operators-ccknj\" (UID: \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\") " pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:09:32 crc kubenswrapper[4769]: I1125 11:09:32.018537 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:09:32 crc kubenswrapper[4769]: I1125 11:09:32.817865 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pq2q8"] Nov 25 11:09:32 crc kubenswrapper[4769]: I1125 11:09:32.821096 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:09:32 crc kubenswrapper[4769]: I1125 11:09:32.850059 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71135c8c-e27a-4ae0-9e15-afe6164def89-utilities\") pod \"redhat-operators-pq2q8\" (UID: \"71135c8c-e27a-4ae0-9e15-afe6164def89\") " pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:09:32 crc kubenswrapper[4769]: I1125 11:09:32.850300 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71135c8c-e27a-4ae0-9e15-afe6164def89-catalog-content\") pod \"redhat-operators-pq2q8\" (UID: \"71135c8c-e27a-4ae0-9e15-afe6164def89\") " pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:09:32 crc kubenswrapper[4769]: I1125 11:09:32.850465 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvbtt\" (UniqueName: \"kubernetes.io/projected/71135c8c-e27a-4ae0-9e15-afe6164def89-kube-api-access-nvbtt\") pod \"redhat-operators-pq2q8\" (UID: \"71135c8c-e27a-4ae0-9e15-afe6164def89\") " pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:09:32 crc kubenswrapper[4769]: I1125 11:09:32.951657 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71135c8c-e27a-4ae0-9e15-afe6164def89-catalog-content\") pod \"redhat-operators-pq2q8\" (UID: \"71135c8c-e27a-4ae0-9e15-afe6164def89\") " pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:09:32 crc kubenswrapper[4769]: I1125 11:09:32.951747 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvbtt\" (UniqueName: \"kubernetes.io/projected/71135c8c-e27a-4ae0-9e15-afe6164def89-kube-api-access-nvbtt\") pod \"redhat-operators-pq2q8\" (UID: \"71135c8c-e27a-4ae0-9e15-afe6164def89\") " pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:09:32 crc kubenswrapper[4769]: I1125 11:09:32.951808 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71135c8c-e27a-4ae0-9e15-afe6164def89-utilities\") pod \"redhat-operators-pq2q8\" (UID: \"71135c8c-e27a-4ae0-9e15-afe6164def89\") " pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:09:32 crc kubenswrapper[4769]: I1125 11:09:32.952512 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71135c8c-e27a-4ae0-9e15-afe6164def89-utilities\") pod \"redhat-operators-pq2q8\" (UID: \"71135c8c-e27a-4ae0-9e15-afe6164def89\") " pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:09:32 crc kubenswrapper[4769]: I1125 11:09:32.952773 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71135c8c-e27a-4ae0-9e15-afe6164def89-catalog-content\") pod \"redhat-operators-pq2q8\" (UID: \"71135c8c-e27a-4ae0-9e15-afe6164def89\") " pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:09:32 crc kubenswrapper[4769]: I1125 11:09:32.974796 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvbtt\" (UniqueName: \"kubernetes.io/projected/71135c8c-e27a-4ae0-9e15-afe6164def89-kube-api-access-nvbtt\") pod \"redhat-operators-pq2q8\" (UID: \"71135c8c-e27a-4ae0-9e15-afe6164def89\") " pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:09:33 crc kubenswrapper[4769]: I1125 11:09:33.049550 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pq2q8"] Nov 25 11:09:33 crc kubenswrapper[4769]: I1125 11:09:33.155771 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:09:33 crc kubenswrapper[4769]: I1125 11:09:33.795141 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-northd-0" podUID="5a0cfe48-e8db-4d53-b3a8-68c5e724538a" containerName="ovn-northd" probeResult="failure" output="command timed out" Nov 25 11:09:33 crc kubenswrapper[4769]: I1125 11:09:33.818302 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="5a0cfe48-e8db-4d53-b3a8-68c5e724538a" containerName="ovn-northd" probeResult="failure" output="command timed out" Nov 25 11:09:35 crc kubenswrapper[4769]: I1125 11:09:35.501743 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-7xcjk" Nov 25 11:09:35 crc kubenswrapper[4769]: I1125 11:09:35.594589 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-zv5xr" Nov 25 11:09:36 crc kubenswrapper[4769]: I1125 11:09:36.047748 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ccknj"] Nov 25 11:09:36 crc kubenswrapper[4769]: I1125 11:09:36.065573 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-kbhzd" Nov 25 11:09:36 crc kubenswrapper[4769]: I1125 11:09:36.068875 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h57kt"] Nov 25 11:09:36 crc kubenswrapper[4769]: I1125 11:09:36.093761 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gwvnd"] Nov 25 11:09:36 crc kubenswrapper[4769]: I1125 11:09:36.109117 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pq2q8"] Nov 25 11:09:36 crc kubenswrapper[4769]: I1125 11:09:36.229249 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-wssdn" Nov 25 11:09:36 crc kubenswrapper[4769]: I1125 11:09:36.241861 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:09:36 crc kubenswrapper[4769]: E1125 11:09:36.242149 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:09:36 crc kubenswrapper[4769]: I1125 11:09:36.467938 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h57kt" event={"ID":"af5c9899-c379-4d60-9aae-979a20de2ad2","Type":"ContainerStarted","Data":"9c104021f2827aae679fc6d341da6863bcbffb01d8d3dac32eb26d335ca836a9"} Nov 25 11:09:36 crc kubenswrapper[4769]: I1125 11:09:36.468938 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccknj" event={"ID":"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24","Type":"ContainerStarted","Data":"a31d9bf85cff2449d94c5ae5b16529c7774f8327c9476b36e351b416801bd6d6"} Nov 25 11:09:36 crc kubenswrapper[4769]: I1125 11:09:36.470415 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pq2q8" event={"ID":"71135c8c-e27a-4ae0-9e15-afe6164def89","Type":"ContainerStarted","Data":"2eb471835bc11ce22740841fef04b65e26b300b7d80d24a7f43ab1506cd8014f"} Nov 25 11:09:36 crc kubenswrapper[4769]: I1125 11:09:36.471837 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvnd" event={"ID":"e588820f-3563-4b98-9ba4-85da1cee2821","Type":"ContainerStarted","Data":"0f3924682089918cf0b19123661c56e8e70a89eff21cf81eafbee6600d73ad54"} Nov 25 11:09:38 crc kubenswrapper[4769]: I1125 11:09:38.362561 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-64569bb78d-pzqdq" Nov 25 11:09:40 crc kubenswrapper[4769]: I1125 11:09:40.515204 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-zwpnn" podUID="aa14b8e2-159e-4850-8816-14bc635838ac" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:09:43 crc kubenswrapper[4769]: I1125 11:09:43.601494 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvnd" event={"ID":"e588820f-3563-4b98-9ba4-85da1cee2821","Type":"ContainerStarted","Data":"502ff72f42825fcb060ef3a6d40b8f79b30387790f9970563a7867bcba02e4ad"} Nov 25 11:09:45 crc kubenswrapper[4769]: I1125 11:09:45.738438 4769 generic.go:334] "Generic (PLEG): container finished" podID="e588820f-3563-4b98-9ba4-85da1cee2821" containerID="502ff72f42825fcb060ef3a6d40b8f79b30387790f9970563a7867bcba02e4ad" exitCode=0 Nov 25 11:09:45 crc kubenswrapper[4769]: I1125 11:09:45.738899 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvnd" event={"ID":"e588820f-3563-4b98-9ba4-85da1cee2821","Type":"ContainerDied","Data":"502ff72f42825fcb060ef3a6d40b8f79b30387790f9970563a7867bcba02e4ad"} Nov 25 11:09:45 crc kubenswrapper[4769]: I1125 11:09:45.745104 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h57kt" event={"ID":"af5c9899-c379-4d60-9aae-979a20de2ad2","Type":"ContainerStarted","Data":"2a4a84b663d988f8a339e2eb208887afadc2463fdc16121db9034fac9c3b993b"} Nov 25 11:09:45 crc kubenswrapper[4769]: I1125 11:09:45.750518 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hv66" event={"ID":"7e334767-99a9-4a84-bcd7-e3e53d56750a","Type":"ContainerStarted","Data":"644c2c018e722cfbb1475b24513cb7a92abcf292944dbd52c593c76c72401d3d"} Nov 25 11:09:45 crc kubenswrapper[4769]: I1125 11:09:45.759738 4769 generic.go:334] "Generic (PLEG): container finished" podID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerID="86e376ec896971639945563443ade776c1d1c20d08a2f850cef284bc5e08fa3b" exitCode=0 Nov 25 11:09:45 crc kubenswrapper[4769]: I1125 11:09:45.759778 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccknj" event={"ID":"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24","Type":"ContainerDied","Data":"86e376ec896971639945563443ade776c1d1c20d08a2f850cef284bc5e08fa3b"} Nov 25 11:09:46 crc kubenswrapper[4769]: I1125 11:09:46.774629 4769 generic.go:334] "Generic (PLEG): container finished" podID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerID="2a4a84b663d988f8a339e2eb208887afadc2463fdc16121db9034fac9c3b993b" exitCode=0 Nov 25 11:09:46 crc kubenswrapper[4769]: I1125 11:09:46.774732 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h57kt" event={"ID":"af5c9899-c379-4d60-9aae-979a20de2ad2","Type":"ContainerDied","Data":"2a4a84b663d988f8a339e2eb208887afadc2463fdc16121db9034fac9c3b993b"} Nov 25 11:09:46 crc kubenswrapper[4769]: I1125 11:09:46.777388 4769 generic.go:334] "Generic (PLEG): container finished" podID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerID="7984ec0fe84d0c0fa77453a85eb5526943846d43cc07424faa5e2780e477fba1" exitCode=0 Nov 25 11:09:46 crc kubenswrapper[4769]: I1125 11:09:46.777458 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pq2q8" event={"ID":"71135c8c-e27a-4ae0-9e15-afe6164def89","Type":"ContainerDied","Data":"7984ec0fe84d0c0fa77453a85eb5526943846d43cc07424faa5e2780e477fba1"} Nov 25 11:09:48 crc kubenswrapper[4769]: I1125 11:09:48.367777 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="52438bf8-8800-4078-bc88-63033a83dd2e" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:09:48 crc kubenswrapper[4769]: I1125 11:09:48.368536 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="52438bf8-8800-4078-bc88-63033a83dd2e" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:09:49 crc kubenswrapper[4769]: I1125 11:09:49.237601 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:09:49 crc kubenswrapper[4769]: E1125 11:09:49.242001 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:09:51 crc kubenswrapper[4769]: I1125 11:09:51.790232 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="eb894707-cfa1-4716-a991-31992d8cff88" containerName="ceilometer-notification-agent" probeResult="failure" output="command timed out" Nov 25 11:09:57 crc kubenswrapper[4769]: I1125 11:09:57.786902 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="52438bf8-8800-4078-bc88-63033a83dd2e" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:09:57 crc kubenswrapper[4769]: I1125 11:09:57.786997 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="52438bf8-8800-4078-bc88-63033a83dd2e" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:09:59 crc kubenswrapper[4769]: I1125 11:09:59.579144 4769 patch_prober.go:28] interesting pod/oauth-openshift-64f9fb64bf-6x7dd container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.54:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 11:09:59 crc kubenswrapper[4769]: I1125 11:09:59.579193 4769 patch_prober.go:28] interesting pod/oauth-openshift-64f9fb64bf-6x7dd container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.54:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 11:09:59 crc kubenswrapper[4769]: I1125 11:09:59.584455 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" podUID="32d90a24-d05b-438f-a46f-4d3663ccb171" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.54:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 11:09:59 crc kubenswrapper[4769]: I1125 11:09:59.584525 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-64f9fb64bf-6x7dd" podUID="32d90a24-d05b-438f-a46f-4d3663ccb171" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.54:6443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 11:09:59 crc kubenswrapper[4769]: I1125 11:09:59.599204 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/openstack-operator-index-qln84" podUID="eeb84a4b-8771-40e8-842b-9a67b1044074" containerName="registry-server" probeResult="failure" output=< Nov 25 11:09:59 crc kubenswrapper[4769]: timeout: health rpc did not complete within 1s Nov 25 11:09:59 crc kubenswrapper[4769]: > Nov 25 11:09:59 crc kubenswrapper[4769]: I1125 11:09:59.605108 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/openstack-operator-index-qln84" podUID="eeb84a4b-8771-40e8-842b-9a67b1044074" containerName="registry-server" probeResult="failure" output=< Nov 25 11:09:59 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:09:59 crc kubenswrapper[4769]: > Nov 25 11:09:59 crc kubenswrapper[4769]: I1125 11:09:59.786126 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="8e360825-f56f-4e69-9e17-c9e78f295267" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:09:59 crc kubenswrapper[4769]: I1125 11:09:59.786170 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="8e360825-f56f-4e69-9e17-c9e78f295267" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:10:01 crc kubenswrapper[4769]: I1125 11:10:01.237663 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:10:01 crc kubenswrapper[4769]: E1125 11:10:01.238197 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:10:01 crc kubenswrapper[4769]: I1125 11:10:01.575194 4769 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wn2f7 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.55:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 11:10:01 crc kubenswrapper[4769]: I1125 11:10:01.575269 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" podUID="2ef0d2ad-687b-4157-8ab5-803122670e19" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.55:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:10:01 crc kubenswrapper[4769]: I1125 11:10:01.575460 4769 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wn2f7 container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.55:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 11:10:01 crc kubenswrapper[4769]: I1125 11:10:01.575530 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-wn2f7" podUID="2ef0d2ad-687b-4157-8ab5-803122670e19" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.55:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:10:01 crc kubenswrapper[4769]: I1125 11:10:01.793447 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="eb894707-cfa1-4716-a991-31992d8cff88" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Nov 25 11:10:02 crc kubenswrapper[4769]: I1125 11:10:02.868327 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c6g96"] Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.070356 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.213454 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjnpc\" (UniqueName: \"kubernetes.io/projected/b831a4b0-2508-4624-b8cb-833a8d5b10ad-kube-api-access-bjnpc\") pod \"redhat-operators-c6g96\" (UID: \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\") " pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.213678 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b831a4b0-2508-4624-b8cb-833a8d5b10ad-catalog-content\") pod \"redhat-operators-c6g96\" (UID: \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\") " pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.214057 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b831a4b0-2508-4624-b8cb-833a8d5b10ad-utilities\") pod \"redhat-operators-c6g96\" (UID: \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\") " pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.316141 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjnpc\" (UniqueName: \"kubernetes.io/projected/b831a4b0-2508-4624-b8cb-833a8d5b10ad-kube-api-access-bjnpc\") pod \"redhat-operators-c6g96\" (UID: \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\") " pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.316203 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b831a4b0-2508-4624-b8cb-833a8d5b10ad-catalog-content\") pod \"redhat-operators-c6g96\" (UID: \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\") " pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.316278 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b831a4b0-2508-4624-b8cb-833a8d5b10ad-utilities\") pod \"redhat-operators-c6g96\" (UID: \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\") " pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.316844 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b831a4b0-2508-4624-b8cb-833a8d5b10ad-utilities\") pod \"redhat-operators-c6g96\" (UID: \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\") " pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.378318 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b831a4b0-2508-4624-b8cb-833a8d5b10ad-catalog-content\") pod \"redhat-operators-c6g96\" (UID: \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\") " pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.655863 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pq2q8" event={"ID":"71135c8c-e27a-4ae0-9e15-afe6164def89","Type":"ContainerStarted","Data":"bf85cbef29e4aeee6b319ac8803fce5bef0973e197737b718e1b27b03a082476"} Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.726143 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvnd" event={"ID":"e588820f-3563-4b98-9ba4-85da1cee2821","Type":"ContainerStarted","Data":"3e8eada057b1d52cc40fd819d5f756a11eaf48ff6396c948a0bcd5bf300e3898"} Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.878684 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h57kt" event={"ID":"af5c9899-c379-4d60-9aae-979a20de2ad2","Type":"ContainerStarted","Data":"633cd28ec7fb44b95489c8af0cfbcffaf11ffc0e9c1961b65114bcbe0841dba6"} Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.917844 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccknj" event={"ID":"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24","Type":"ContainerStarted","Data":"151df6f7f41c3828ab34928d0805938c6a65452bacc249ceaa2784140a1136a7"} Nov 25 11:10:03 crc kubenswrapper[4769]: I1125 11:10:03.985166 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjnpc\" (UniqueName: \"kubernetes.io/projected/b831a4b0-2508-4624-b8cb-833a8d5b10ad-kube-api-access-bjnpc\") pod \"redhat-operators-c6g96\" (UID: \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\") " pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:04 crc kubenswrapper[4769]: I1125 11:10:04.129180 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:04 crc kubenswrapper[4769]: I1125 11:10:04.362470 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c6g96"] Nov 25 11:10:07 crc kubenswrapper[4769]: I1125 11:10:07.313354 4769 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 3.974081052s: [/var/lib/containers/storage/overlay/80ea85e26438170f30ca958408540eae6fba49e0a382ea0f011beada3de2bb54/diff /var/log/pods/openstack_openstackclient_be068e15-9a8b-472c-9a66-8ee06cf2491f/openstackclient/0.log]; will not log again for this container unless duration exceeds 2s Nov 25 11:10:07 crc kubenswrapper[4769]: I1125 11:10:07.786595 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="52438bf8-8800-4078-bc88-63033a83dd2e" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:10:07 crc kubenswrapper[4769]: I1125 11:10:07.786859 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 11:10:07 crc kubenswrapper[4769]: I1125 11:10:07.786619 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="52438bf8-8800-4078-bc88-63033a83dd2e" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:10:07 crc kubenswrapper[4769]: I1125 11:10:07.788662 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 11:10:08 crc kubenswrapper[4769]: I1125 11:10:07.883128 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="galera" containerStatusID={"Type":"cri-o","ID":"7699fb5bf48b56d797190b3abce5b038db7167de6ff0dcfdf201bfe7e3850e8e"} pod="openstack/openstack-galera-0" containerMessage="Container galera failed liveness probe, will be restarted" Nov 25 11:10:09 crc kubenswrapper[4769]: I1125 11:10:09.786118 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="52438bf8-8800-4078-bc88-63033a83dd2e" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:10:10 crc kubenswrapper[4769]: I1125 11:10:10.449130 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-zwpnn" podUID="aa14b8e2-159e-4850-8816-14bc635838ac" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:10:10 crc kubenswrapper[4769]: I1125 11:10:10.879287 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="52438bf8-8800-4078-bc88-63033a83dd2e" containerName="galera" containerID="cri-o://7699fb5bf48b56d797190b3abce5b038db7167de6ff0dcfdf201bfe7e3850e8e" gracePeriod=28 Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.005054 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h2wlq" event={"ID":"993bc795-fde2-4876-9e76-e97a6095576d","Type":"ContainerDied","Data":"667cf0a9aad845523b83761ec7e6b90f49da63f32b87d16da5405b0dc2032a15"} Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.005745 4769 generic.go:334] "Generic (PLEG): container finished" podID="993bc795-fde2-4876-9e76-e97a6095576d" containerID="667cf0a9aad845523b83761ec7e6b90f49da63f32b87d16da5405b0dc2032a15" exitCode=0 Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.009143 4769 generic.go:334] "Generic (PLEG): container finished" podID="4a6225f9-cdc5-486a-b813-db81a752fade" containerID="752cb86da8fecd9cd08ae2eb2fb21f293e404c471b4243d3fc085b86ae4ad013" exitCode=0 Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.009220 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-856hd" event={"ID":"4a6225f9-cdc5-486a-b813-db81a752fade","Type":"ContainerDied","Data":"752cb86da8fecd9cd08ae2eb2fb21f293e404c471b4243d3fc085b86ae4ad013"} Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.011872 4769 generic.go:334] "Generic (PLEG): container finished" podID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerID="2846aa87a8fb70c18047658bbab361766ef69a9d93f19ad2dc263e2db41e2321" exitCode=0 Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.011925 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkx9w" event={"ID":"f1b1fb95-30bd-457e-b59c-c06e6b770f42","Type":"ContainerDied","Data":"2846aa87a8fb70c18047658bbab361766ef69a9d93f19ad2dc263e2db41e2321"} Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.017936 4769 generic.go:334] "Generic (PLEG): container finished" podID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerID="543eaf3d2bfcd44951d40cc61618b80d30d796e12a6f97d497bcc3f393499c9d" exitCode=0 Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.018003 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vr22m" event={"ID":"e9d04b78-f610-43ce-bc66-ca181acf6654","Type":"ContainerDied","Data":"543eaf3d2bfcd44951d40cc61618b80d30d796e12a6f97d497bcc3f393499c9d"} Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.029827 4769 generic.go:334] "Generic (PLEG): container finished" podID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerID="d632444cd9b6b05bdddaf9e5883e0e245e2d64c8856e489fca8a481aa146d254" exitCode=0 Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.029885 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9v54j" event={"ID":"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe","Type":"ContainerDied","Data":"d632444cd9b6b05bdddaf9e5883e0e245e2d64c8856e489fca8a481aa146d254"} Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.037520 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.759738 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-59lwg"] Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.763522 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-59lwg" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerName="registry-server" containerID="cri-o://76a315f23d8193ceea354b8a1239cea5998d44c943d0ca8e2897621caea918a9" gracePeriod=2 Nov 25 11:10:11 crc kubenswrapper[4769]: I1125 11:10:11.857016 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="18a4a142-bb1b-4e44-9110-6a6e15b86b0d" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 11:10:12 crc kubenswrapper[4769]: E1125 11:10:12.496990 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 76a315f23d8193ceea354b8a1239cea5998d44c943d0ca8e2897621caea918a9 is running failed: container process not found" containerID="76a315f23d8193ceea354b8a1239cea5998d44c943d0ca8e2897621caea918a9" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 11:10:12 crc kubenswrapper[4769]: E1125 11:10:12.499603 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 76a315f23d8193ceea354b8a1239cea5998d44c943d0ca8e2897621caea918a9 is running failed: container process not found" containerID="76a315f23d8193ceea354b8a1239cea5998d44c943d0ca8e2897621caea918a9" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 11:10:12 crc kubenswrapper[4769]: E1125 11:10:12.502187 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 76a315f23d8193ceea354b8a1239cea5998d44c943d0ca8e2897621caea918a9 is running failed: container process not found" containerID="76a315f23d8193ceea354b8a1239cea5998d44c943d0ca8e2897621caea918a9" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 11:10:12 crc kubenswrapper[4769]: E1125 11:10:12.502245 4769 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 76a315f23d8193ceea354b8a1239cea5998d44c943d0ca8e2897621caea918a9 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-59lwg" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerName="registry-server" Nov 25 11:10:13 crc kubenswrapper[4769]: I1125 11:10:13.119216 4769 generic.go:334] "Generic (PLEG): container finished" podID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerID="76a315f23d8193ceea354b8a1239cea5998d44c943d0ca8e2897621caea918a9" exitCode=0 Nov 25 11:10:13 crc kubenswrapper[4769]: I1125 11:10:13.119738 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59lwg" event={"ID":"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c","Type":"ContainerDied","Data":"76a315f23d8193ceea354b8a1239cea5998d44c943d0ca8e2897621caea918a9"} Nov 25 11:10:13 crc kubenswrapper[4769]: I1125 11:10:13.128773 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkx9w" event={"ID":"f1b1fb95-30bd-457e-b59c-c06e6b770f42","Type":"ContainerStarted","Data":"9efaa7da0c863469625d1dad8af55684166514f7474256c07dcaa91b94506e08"} Nov 25 11:10:13 crc kubenswrapper[4769]: I1125 11:10:13.150350 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vr22m" event={"ID":"e9d04b78-f610-43ce-bc66-ca181acf6654","Type":"ContainerStarted","Data":"7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41"} Nov 25 11:10:13 crc kubenswrapper[4769]: I1125 11:10:13.169535 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9v54j" event={"ID":"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe","Type":"ContainerStarted","Data":"0f188e2c4c0b837c01d6ec185b9309ce415641bdf1e752b67ec2071fe9681798"} Nov 25 11:10:13 crc kubenswrapper[4769]: I1125 11:10:13.499880 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gkx9w" podStartSLOduration=3.664352263 podStartE2EDuration="54.495175723s" podCreationTimestamp="2025-11-25 11:09:19 +0000 UTC" firstStartedPulling="2025-11-25 11:09:21.103705849 +0000 UTC m=+5109.688678162" lastFinishedPulling="2025-11-25 11:10:11.934529309 +0000 UTC m=+5160.519501622" observedRunningTime="2025-11-25 11:10:13.438547643 +0000 UTC m=+5162.023519956" watchObservedRunningTime="2025-11-25 11:10:13.495175723 +0000 UTC m=+5162.080148036" Nov 25 11:10:13 crc kubenswrapper[4769]: I1125 11:10:13.519218 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vr22m" podStartSLOduration=4.866813912 podStartE2EDuration="52.51919614s" podCreationTimestamp="2025-11-25 11:09:21 +0000 UTC" firstStartedPulling="2025-11-25 11:09:24.256654376 +0000 UTC m=+5112.841626689" lastFinishedPulling="2025-11-25 11:10:11.909036604 +0000 UTC m=+5160.494008917" observedRunningTime="2025-11-25 11:10:13.494144936 +0000 UTC m=+5162.079117259" watchObservedRunningTime="2025-11-25 11:10:13.51919614 +0000 UTC m=+5162.104168453" Nov 25 11:10:14 crc kubenswrapper[4769]: I1125 11:10:14.268162 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h2wlq" event={"ID":"993bc795-fde2-4876-9e76-e97a6095576d","Type":"ContainerStarted","Data":"29fdfb935697aee46562dcc97a060e9c451fb96efb86b1264bca18990f56b411"} Nov 25 11:10:14 crc kubenswrapper[4769]: I1125 11:10:14.268454 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-856hd" event={"ID":"4a6225f9-cdc5-486a-b813-db81a752fade","Type":"ContainerStarted","Data":"c11092b5f3f5b634a49406a81945567aba55f7e8eba28dfd0eb51eb84b1e6c5b"} Nov 25 11:10:14 crc kubenswrapper[4769]: I1125 11:10:14.315288 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9v54j" podStartSLOduration=5.408725862 podStartE2EDuration="56.315264519s" podCreationTimestamp="2025-11-25 11:09:18 +0000 UTC" firstStartedPulling="2025-11-25 11:09:21.107111359 +0000 UTC m=+5109.692083672" lastFinishedPulling="2025-11-25 11:10:12.013650016 +0000 UTC m=+5160.598622329" observedRunningTime="2025-11-25 11:10:14.29151653 +0000 UTC m=+5162.876488843" watchObservedRunningTime="2025-11-25 11:10:14.315264519 +0000 UTC m=+5162.900236832" Nov 25 11:10:14 crc kubenswrapper[4769]: I1125 11:10:14.337921 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h2wlq" podStartSLOduration=4.040993758 podStartE2EDuration="51.337904749s" podCreationTimestamp="2025-11-25 11:09:23 +0000 UTC" firstStartedPulling="2025-11-25 11:09:25.312463157 +0000 UTC m=+5113.897435470" lastFinishedPulling="2025-11-25 11:10:12.609374148 +0000 UTC m=+5161.194346461" observedRunningTime="2025-11-25 11:10:14.337306603 +0000 UTC m=+5162.922278926" watchObservedRunningTime="2025-11-25 11:10:14.337904749 +0000 UTC m=+5162.922877062" Nov 25 11:10:14 crc kubenswrapper[4769]: I1125 11:10:14.363772 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-856hd" podStartSLOduration=5.528405418 podStartE2EDuration="54.363747534s" podCreationTimestamp="2025-11-25 11:09:20 +0000 UTC" firstStartedPulling="2025-11-25 11:09:23.1632449 +0000 UTC m=+5111.748217213" lastFinishedPulling="2025-11-25 11:10:11.998587026 +0000 UTC m=+5160.583559329" observedRunningTime="2025-11-25 11:10:14.321785612 +0000 UTC m=+5162.906757925" watchObservedRunningTime="2025-11-25 11:10:14.363747534 +0000 UTC m=+5162.948719847" Nov 25 11:10:15 crc kubenswrapper[4769]: I1125 11:10:15.230838 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="18a4a142-bb1b-4e44-9110-6a6e15b86b0d" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 11:10:15 crc kubenswrapper[4769]: I1125 11:10:15.244461 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:10:15 crc kubenswrapper[4769]: E1125 11:10:15.248328 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:10:15 crc kubenswrapper[4769]: I1125 11:10:15.928496 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.162384 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-utilities\") pod \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\" (UID: \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\") " Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.162715 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-catalog-content\") pod \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\" (UID: \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\") " Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.162919 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jfg8\" (UniqueName: \"kubernetes.io/projected/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-kube-api-access-6jfg8\") pod \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\" (UID: \"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c\") " Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.167261 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-utilities" (OuterVolumeSpecName: "utilities") pod "65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" (UID: "65d1fe44-2bb2-4d58-8bcf-7e15ab70164c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.266286 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-kube-api-access-6jfg8" (OuterVolumeSpecName: "kube-api-access-6jfg8") pod "65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" (UID: "65d1fe44-2bb2-4d58-8bcf-7e15ab70164c"). InnerVolumeSpecName "kube-api-access-6jfg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.269242 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jfg8\" (UniqueName: \"kubernetes.io/projected/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-kube-api-access-6jfg8\") on node \"crc\" DevicePath \"\"" Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.276352 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.336567 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c6g96"] Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.369878 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-59lwg" event={"ID":"65d1fe44-2bb2-4d58-8bcf-7e15ab70164c","Type":"ContainerDied","Data":"c9973f80bdb330b9aa618c8a1e0c1373807dcd679e372630c89497be947d1d91"} Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.369986 4769 scope.go:117] "RemoveContainer" containerID="76a315f23d8193ceea354b8a1239cea5998d44c943d0ca8e2897621caea918a9" Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.370184 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-59lwg" Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.503238 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" (UID: "65d1fe44-2bb2-4d58-8bcf-7e15ab70164c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:10:16 crc kubenswrapper[4769]: E1125 11:10:16.512083 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7699fb5bf48b56d797190b3abce5b038db7167de6ff0dcfdf201bfe7e3850e8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 25 11:10:16 crc kubenswrapper[4769]: E1125 11:10:16.524091 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7699fb5bf48b56d797190b3abce5b038db7167de6ff0dcfdf201bfe7e3850e8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 25 11:10:16 crc kubenswrapper[4769]: E1125 11:10:16.529700 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7699fb5bf48b56d797190b3abce5b038db7167de6ff0dcfdf201bfe7e3850e8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 25 11:10:16 crc kubenswrapper[4769]: E1125 11:10:16.529775 4769 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="52438bf8-8800-4078-bc88-63033a83dd2e" containerName="galera" Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.548257 4769 scope.go:117] "RemoveContainer" containerID="4ffaa6f5d5b58fb9d51c7e199e37d73b9de720dc76f1d5a84234ed5086c3bfbe" Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.584727 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.659480 4769 scope.go:117] "RemoveContainer" containerID="c3f13af9127764c58d2fdbbe10049fc30f347ca528920d0e5f4a8ef419174def" Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.804016 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-59lwg"] Nov 25 11:10:16 crc kubenswrapper[4769]: I1125 11:10:16.816017 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-59lwg"] Nov 25 11:10:16 crc kubenswrapper[4769]: E1125 11:10:16.975244 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65d1fe44_2bb2_4d58_8bcf_7e15ab70164c.slice/crio-c9973f80bdb330b9aa618c8a1e0c1373807dcd679e372630c89497be947d1d91\": RecentStats: unable to find data in memory cache]" Nov 25 11:10:17 crc kubenswrapper[4769]: I1125 11:10:17.396320 4769 generic.go:334] "Generic (PLEG): container finished" podID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerID="cf737d64efee37e10188ee23890ddb22c0991065a33ac0bc92171731b9229042" exitCode=0 Nov 25 11:10:17 crc kubenswrapper[4769]: I1125 11:10:17.396670 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6g96" event={"ID":"b831a4b0-2508-4624-b8cb-833a8d5b10ad","Type":"ContainerDied","Data":"cf737d64efee37e10188ee23890ddb22c0991065a33ac0bc92171731b9229042"} Nov 25 11:10:17 crc kubenswrapper[4769]: I1125 11:10:17.396699 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6g96" event={"ID":"b831a4b0-2508-4624-b8cb-833a8d5b10ad","Type":"ContainerStarted","Data":"6671f69131da140fc91ee06602c7b847c55d8657e59353faffc6e730f058ebc3"} Nov 25 11:10:17 crc kubenswrapper[4769]: I1125 11:10:17.695819 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="18a4a142-bb1b-4e44-9110-6a6e15b86b0d" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 11:10:17 crc kubenswrapper[4769]: I1125 11:10:17.695921 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 11:10:17 crc kubenswrapper[4769]: I1125 11:10:17.696960 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cinder-scheduler" containerStatusID={"Type":"cri-o","ID":"7efe33511fbdd50d518a9752921c8dc97ebbb6fbae7b5925d6b3de5b3cf3f76d"} pod="openstack/cinder-scheduler-0" containerMessage="Container cinder-scheduler failed liveness probe, will be restarted" Nov 25 11:10:17 crc kubenswrapper[4769]: I1125 11:10:17.697035 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="18a4a142-bb1b-4e44-9110-6a6e15b86b0d" containerName="cinder-scheduler" containerID="cri-o://7efe33511fbdd50d518a9752921c8dc97ebbb6fbae7b5925d6b3de5b3cf3f76d" gracePeriod=30 Nov 25 11:10:18 crc kubenswrapper[4769]: I1125 11:10:18.016142 4769 trace.go:236] Trace[1879915080]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-index-gateway-0" (25-Nov-2025 11:10:16.783) (total time: 1227ms): Nov 25 11:10:18 crc kubenswrapper[4769]: Trace[1879915080]: [1.227002325s] [1.227002325s] END Nov 25 11:10:18 crc kubenswrapper[4769]: I1125 11:10:18.283494 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" path="/var/lib/kubelet/pods/65d1fe44-2bb2-4d58-8bcf-7e15ab70164c/volumes" Nov 25 11:10:19 crc kubenswrapper[4769]: I1125 11:10:19.102526 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:10:19 crc kubenswrapper[4769]: I1125 11:10:19.102935 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:10:19 crc kubenswrapper[4769]: I1125 11:10:19.786364 4769 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="8e360825-f56f-4e69-9e17-c9e78f295267" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:10:19 crc kubenswrapper[4769]: I1125 11:10:19.786456 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="8e360825-f56f-4e69-9e17-c9e78f295267" containerName="galera" probeResult="failure" output="command timed out" Nov 25 11:10:19 crc kubenswrapper[4769]: I1125 11:10:19.964007 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:10:19 crc kubenswrapper[4769]: I1125 11:10:19.964112 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:10:20 crc kubenswrapper[4769]: I1125 11:10:20.184780 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:20 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:20 crc kubenswrapper[4769]: > Nov 25 11:10:20 crc kubenswrapper[4769]: I1125 11:10:20.449174 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-zwpnn" podUID="aa14b8e2-159e-4850-8816-14bc635838ac" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 11:10:21 crc kubenswrapper[4769]: I1125 11:10:21.019401 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:21 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:21 crc kubenswrapper[4769]: > Nov 25 11:10:21 crc kubenswrapper[4769]: I1125 11:10:21.184433 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:10:21 crc kubenswrapper[4769]: I1125 11:10:21.184483 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:10:22 crc kubenswrapper[4769]: I1125 11:10:22.236073 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:22 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:22 crc kubenswrapper[4769]: > Nov 25 11:10:22 crc kubenswrapper[4769]: I1125 11:10:22.435218 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:10:22 crc kubenswrapper[4769]: I1125 11:10:22.435267 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:10:22 crc kubenswrapper[4769]: I1125 11:10:22.476130 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6g96" event={"ID":"b831a4b0-2508-4624-b8cb-833a8d5b10ad","Type":"ContainerStarted","Data":"4cdb4d6b888281d31d627252cabf5488591fdad0dec842d0140089deb3e6dd96"} Nov 25 11:10:23 crc kubenswrapper[4769]: I1125 11:10:23.528504 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:23 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:23 crc kubenswrapper[4769]: > Nov 25 11:10:23 crc kubenswrapper[4769]: I1125 11:10:23.574855 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:10:23 crc kubenswrapper[4769]: I1125 11:10:23.574917 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:10:24 crc kubenswrapper[4769]: I1125 11:10:24.515292 4769 generic.go:334] "Generic (PLEG): container finished" podID="18a4a142-bb1b-4e44-9110-6a6e15b86b0d" containerID="7efe33511fbdd50d518a9752921c8dc97ebbb6fbae7b5925d6b3de5b3cf3f76d" exitCode=0 Nov 25 11:10:24 crc kubenswrapper[4769]: I1125 11:10:24.515381 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"18a4a142-bb1b-4e44-9110-6a6e15b86b0d","Type":"ContainerDied","Data":"7efe33511fbdd50d518a9752921c8dc97ebbb6fbae7b5925d6b3de5b3cf3f76d"} Nov 25 11:10:24 crc kubenswrapper[4769]: I1125 11:10:24.633249 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:24 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:24 crc kubenswrapper[4769]: > Nov 25 11:10:26 crc kubenswrapper[4769]: E1125 11:10:26.512464 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7699fb5bf48b56d797190b3abce5b038db7167de6ff0dcfdf201bfe7e3850e8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 25 11:10:26 crc kubenswrapper[4769]: E1125 11:10:26.515576 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7699fb5bf48b56d797190b3abce5b038db7167de6ff0dcfdf201bfe7e3850e8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 25 11:10:26 crc kubenswrapper[4769]: E1125 11:10:26.517073 4769 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7699fb5bf48b56d797190b3abce5b038db7167de6ff0dcfdf201bfe7e3850e8e" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 25 11:10:26 crc kubenswrapper[4769]: E1125 11:10:26.517119 4769 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="52438bf8-8800-4078-bc88-63033a83dd2e" containerName="galera" Nov 25 11:10:27 crc kubenswrapper[4769]: I1125 11:10:27.551099 4769 generic.go:334] "Generic (PLEG): container finished" podID="52438bf8-8800-4078-bc88-63033a83dd2e" containerID="7699fb5bf48b56d797190b3abce5b038db7167de6ff0dcfdf201bfe7e3850e8e" exitCode=0 Nov 25 11:10:27 crc kubenswrapper[4769]: I1125 11:10:27.551184 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"52438bf8-8800-4078-bc88-63033a83dd2e","Type":"ContainerDied","Data":"7699fb5bf48b56d797190b3abce5b038db7167de6ff0dcfdf201bfe7e3850e8e"} Nov 25 11:10:27 crc kubenswrapper[4769]: I1125 11:10:27.551544 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"52438bf8-8800-4078-bc88-63033a83dd2e","Type":"ContainerStarted","Data":"4f66461077e569b7e3c706cb31aded0997045eddedc952c4a9810e617a94565e"} Nov 25 11:10:27 crc kubenswrapper[4769]: I1125 11:10:27.554759 4769 generic.go:334] "Generic (PLEG): container finished" podID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerID="644c2c018e722cfbb1475b24513cb7a92abcf292944dbd52c593c76c72401d3d" exitCode=0 Nov 25 11:10:27 crc kubenswrapper[4769]: I1125 11:10:27.554833 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hv66" event={"ID":"7e334767-99a9-4a84-bcd7-e3e53d56750a","Type":"ContainerDied","Data":"644c2c018e722cfbb1475b24513cb7a92abcf292944dbd52c593c76c72401d3d"} Nov 25 11:10:29 crc kubenswrapper[4769]: I1125 11:10:29.580395 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hv66" event={"ID":"7e334767-99a9-4a84-bcd7-e3e53d56750a","Type":"ContainerStarted","Data":"2a261c0d637945ef58e5165df0fc32835e944bf6bd07452f4b65407653066e62"} Nov 25 11:10:29 crc kubenswrapper[4769]: I1125 11:10:29.608821 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9hv66" podStartSLOduration=5.324543565 podStartE2EDuration="1m5.608796942s" podCreationTimestamp="2025-11-25 11:09:24 +0000 UTC" firstStartedPulling="2025-11-25 11:09:28.557311669 +0000 UTC m=+5117.142283982" lastFinishedPulling="2025-11-25 11:10:28.841565046 +0000 UTC m=+5177.426537359" observedRunningTime="2025-11-25 11:10:29.600169003 +0000 UTC m=+5178.185141346" watchObservedRunningTime="2025-11-25 11:10:29.608796942 +0000 UTC m=+5178.193769255" Nov 25 11:10:30 crc kubenswrapper[4769]: I1125 11:10:30.168353 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:30 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:30 crc kubenswrapper[4769]: > Nov 25 11:10:30 crc kubenswrapper[4769]: I1125 11:10:30.237245 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:10:30 crc kubenswrapper[4769]: E1125 11:10:30.237533 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:10:31 crc kubenswrapper[4769]: I1125 11:10:31.019872 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:31 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:31 crc kubenswrapper[4769]: > Nov 25 11:10:32 crc kubenswrapper[4769]: I1125 11:10:32.834500 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:32 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:32 crc kubenswrapper[4769]: > Nov 25 11:10:33 crc kubenswrapper[4769]: I1125 11:10:33.528615 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:33 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:33 crc kubenswrapper[4769]: > Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.176203 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.176546 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.242883 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:35 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:35 crc kubenswrapper[4769]: > Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.651251 4769 generic.go:334] "Generic (PLEG): container finished" podID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerID="4cdb4d6b888281d31d627252cabf5488591fdad0dec842d0140089deb3e6dd96" exitCode=0 Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.651302 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6g96" event={"ID":"b831a4b0-2508-4624-b8cb-833a8d5b10ad","Type":"ContainerDied","Data":"4cdb4d6b888281d31d627252cabf5488591fdad0dec842d0140089deb3e6dd96"} Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.653370 4769 generic.go:334] "Generic (PLEG): container finished" podID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerID="bf85cbef29e4aeee6b319ac8803fce5bef0973e197737b718e1b27b03a082476" exitCode=0 Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.653454 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pq2q8" event={"ID":"71135c8c-e27a-4ae0-9e15-afe6164def89","Type":"ContainerDied","Data":"bf85cbef29e4aeee6b319ac8803fce5bef0973e197737b718e1b27b03a082476"} Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.657328 4769 generic.go:334] "Generic (PLEG): container finished" podID="e588820f-3563-4b98-9ba4-85da1cee2821" containerID="3e8eada057b1d52cc40fd819d5f756a11eaf48ff6396c948a0bcd5bf300e3898" exitCode=0 Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.657392 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvnd" event={"ID":"e588820f-3563-4b98-9ba4-85da1cee2821","Type":"ContainerDied","Data":"3e8eada057b1d52cc40fd819d5f756a11eaf48ff6396c948a0bcd5bf300e3898"} Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.668182 4769 generic.go:334] "Generic (PLEG): container finished" podID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerID="633cd28ec7fb44b95489c8af0cfbcffaf11ffc0e9c1961b65114bcbe0841dba6" exitCode=0 Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.668236 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h57kt" event={"ID":"af5c9899-c379-4d60-9aae-979a20de2ad2","Type":"ContainerDied","Data":"633cd28ec7fb44b95489c8af0cfbcffaf11ffc0e9c1961b65114bcbe0841dba6"} Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.674544 4769 generic.go:334] "Generic (PLEG): container finished" podID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerID="151df6f7f41c3828ab34928d0805938c6a65452bacc249ceaa2784140a1136a7" exitCode=0 Nov 25 11:10:35 crc kubenswrapper[4769]: I1125 11:10:35.674592 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccknj" event={"ID":"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24","Type":"ContainerDied","Data":"151df6f7f41c3828ab34928d0805938c6a65452bacc249ceaa2784140a1136a7"} Nov 25 11:10:36 crc kubenswrapper[4769]: I1125 11:10:36.235203 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:36 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:36 crc kubenswrapper[4769]: > Nov 25 11:10:36 crc kubenswrapper[4769]: I1125 11:10:36.510692 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 11:10:36 crc kubenswrapper[4769]: I1125 11:10:36.510738 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.117776 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.218954 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.701540 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccknj" event={"ID":"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24","Type":"ContainerStarted","Data":"06b1191036f240719dbe9a83b3904364f6e7209c7ca666c6105627d6be466272"} Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.704438 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6g96" event={"ID":"b831a4b0-2508-4624-b8cb-833a8d5b10ad","Type":"ContainerStarted","Data":"eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c"} Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.707934 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pq2q8" event={"ID":"71135c8c-e27a-4ae0-9e15-afe6164def89","Type":"ContainerStarted","Data":"169354e3899914d79722f328dea57dfd7c4398163494f6544ce032cba9e0b620"} Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.710117 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvnd" event={"ID":"e588820f-3563-4b98-9ba4-85da1cee2821","Type":"ContainerStarted","Data":"88daf248b5618ddfa14959abc53a3a1d3536978e0cd35c21a95568586424539e"} Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.713381 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"18a4a142-bb1b-4e44-9110-6a6e15b86b0d","Type":"ContainerStarted","Data":"87633ca5bbdb3e8dd2d59b7ecedf2f3221038cecaff74172c7ab1d064f61ecdf"} Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.719891 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h57kt" event={"ID":"af5c9899-c379-4d60-9aae-979a20de2ad2","Type":"ContainerStarted","Data":"4ba6f479383af1853419c0467f8b024327a29a7aa073259cad5c62f4e347f088"} Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.741199 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ccknj" podStartSLOduration=16.662219012 podStartE2EDuration="1m6.741181403s" podCreationTimestamp="2025-11-25 11:09:31 +0000 UTC" firstStartedPulling="2025-11-25 11:09:46.779326463 +0000 UTC m=+5135.364298776" lastFinishedPulling="2025-11-25 11:10:36.858288854 +0000 UTC m=+5185.443261167" observedRunningTime="2025-11-25 11:10:37.724874491 +0000 UTC m=+5186.309846804" watchObservedRunningTime="2025-11-25 11:10:37.741181403 +0000 UTC m=+5186.326153736" Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.755373 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c6g96" podStartSLOduration=15.825494131 podStartE2EDuration="35.755358619s" podCreationTimestamp="2025-11-25 11:10:02 +0000 UTC" firstStartedPulling="2025-11-25 11:10:17.40118686 +0000 UTC m=+5165.986159173" lastFinishedPulling="2025-11-25 11:10:37.331051348 +0000 UTC m=+5185.916023661" observedRunningTime="2025-11-25 11:10:37.744341117 +0000 UTC m=+5186.329313430" watchObservedRunningTime="2025-11-25 11:10:37.755358619 +0000 UTC m=+5186.340330932" Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.774473 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gwvnd" podStartSLOduration=18.769810743 podStartE2EDuration="1m8.774454055s" podCreationTimestamp="2025-11-25 11:09:29 +0000 UTC" firstStartedPulling="2025-11-25 11:09:46.781751067 +0000 UTC m=+5135.366723380" lastFinishedPulling="2025-11-25 11:10:36.786394379 +0000 UTC m=+5185.371366692" observedRunningTime="2025-11-25 11:10:37.763572346 +0000 UTC m=+5186.348544659" watchObservedRunningTime="2025-11-25 11:10:37.774454055 +0000 UTC m=+5186.359426368" Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.787656 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pq2q8" podStartSLOduration=15.501622338 podStartE2EDuration="1m5.787640584s" podCreationTimestamp="2025-11-25 11:09:32 +0000 UTC" firstStartedPulling="2025-11-25 11:09:46.779658532 +0000 UTC m=+5135.364630845" lastFinishedPulling="2025-11-25 11:10:37.065676778 +0000 UTC m=+5185.650649091" observedRunningTime="2025-11-25 11:10:37.786977266 +0000 UTC m=+5186.371949579" watchObservedRunningTime="2025-11-25 11:10:37.787640584 +0000 UTC m=+5186.372612897" Nov 25 11:10:37 crc kubenswrapper[4769]: I1125 11:10:37.876380 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h57kt" podStartSLOduration=17.704597525 podStartE2EDuration="1m7.876361314s" podCreationTimestamp="2025-11-25 11:09:30 +0000 UTC" firstStartedPulling="2025-11-25 11:09:46.777550876 +0000 UTC m=+5135.362523199" lastFinishedPulling="2025-11-25 11:10:36.949314665 +0000 UTC m=+5185.534286988" observedRunningTime="2025-11-25 11:10:37.828365713 +0000 UTC m=+5186.413338026" watchObservedRunningTime="2025-11-25 11:10:37.876361314 +0000 UTC m=+5186.461333627" Nov 25 11:10:39 crc kubenswrapper[4769]: I1125 11:10:39.884138 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:10:39 crc kubenswrapper[4769]: I1125 11:10:39.884602 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:10:40 crc kubenswrapper[4769]: I1125 11:10:40.179732 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:40 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:40 crc kubenswrapper[4769]: > Nov 25 11:10:40 crc kubenswrapper[4769]: I1125 11:10:40.946790 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:40 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:40 crc kubenswrapper[4769]: > Nov 25 11:10:41 crc kubenswrapper[4769]: I1125 11:10:41.045469 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:41 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:41 crc kubenswrapper[4769]: > Nov 25 11:10:41 crc kubenswrapper[4769]: I1125 11:10:41.684166 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:10:41 crc kubenswrapper[4769]: I1125 11:10:41.684220 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:10:42 crc kubenswrapper[4769]: I1125 11:10:42.018773 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:10:42 crc kubenswrapper[4769]: I1125 11:10:42.020716 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:10:42 crc kubenswrapper[4769]: I1125 11:10:42.253548 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:42 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:42 crc kubenswrapper[4769]: > Nov 25 11:10:42 crc kubenswrapper[4769]: I1125 11:10:42.608620 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 11:10:42 crc kubenswrapper[4769]: I1125 11:10:42.656106 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 11:10:42 crc kubenswrapper[4769]: I1125 11:10:42.747738 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:42 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:42 crc kubenswrapper[4769]: > Nov 25 11:10:43 crc kubenswrapper[4769]: I1125 11:10:43.114792 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:43 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:43 crc kubenswrapper[4769]: > Nov 25 11:10:43 crc kubenswrapper[4769]: I1125 11:10:43.156219 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:10:43 crc kubenswrapper[4769]: I1125 11:10:43.157445 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:10:43 crc kubenswrapper[4769]: I1125 11:10:43.501795 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:43 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:43 crc kubenswrapper[4769]: > Nov 25 11:10:44 crc kubenswrapper[4769]: I1125 11:10:44.133341 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:44 crc kubenswrapper[4769]: I1125 11:10:44.133717 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:10:44 crc kubenswrapper[4769]: I1125 11:10:44.257577 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:44 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:44 crc kubenswrapper[4769]: > Nov 25 11:10:45 crc kubenswrapper[4769]: I1125 11:10:45.201001 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:45 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:45 crc kubenswrapper[4769]: > Nov 25 11:10:45 crc kubenswrapper[4769]: I1125 11:10:45.206488 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:45 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:45 crc kubenswrapper[4769]: > Nov 25 11:10:45 crc kubenswrapper[4769]: I1125 11:10:45.237212 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:10:45 crc kubenswrapper[4769]: E1125 11:10:45.237642 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:10:46 crc kubenswrapper[4769]: I1125 11:10:46.228771 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:46 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:46 crc kubenswrapper[4769]: > Nov 25 11:10:50 crc kubenswrapper[4769]: I1125 11:10:50.152887 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:50 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:50 crc kubenswrapper[4769]: > Nov 25 11:10:50 crc kubenswrapper[4769]: I1125 11:10:50.946545 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:50 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:50 crc kubenswrapper[4769]: > Nov 25 11:10:51 crc kubenswrapper[4769]: I1125 11:10:51.042554 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:51 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:51 crc kubenswrapper[4769]: > Nov 25 11:10:52 crc kubenswrapper[4769]: I1125 11:10:52.254406 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:52 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:52 crc kubenswrapper[4769]: > Nov 25 11:10:52 crc kubenswrapper[4769]: I1125 11:10:52.737078 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:52 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:52 crc kubenswrapper[4769]: > Nov 25 11:10:53 crc kubenswrapper[4769]: I1125 11:10:53.071504 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:53 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:53 crc kubenswrapper[4769]: > Nov 25 11:10:53 crc kubenswrapper[4769]: I1125 11:10:53.498068 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:53 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:53 crc kubenswrapper[4769]: > Nov 25 11:10:54 crc kubenswrapper[4769]: I1125 11:10:54.218629 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:54 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:54 crc kubenswrapper[4769]: > Nov 25 11:10:54 crc kubenswrapper[4769]: I1125 11:10:54.631491 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:54 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:54 crc kubenswrapper[4769]: > Nov 25 11:10:55 crc kubenswrapper[4769]: I1125 11:10:55.187428 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:55 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:55 crc kubenswrapper[4769]: > Nov 25 11:10:56 crc kubenswrapper[4769]: I1125 11:10:56.236998 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:10:56 crc kubenswrapper[4769]: E1125 11:10:56.237405 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:10:56 crc kubenswrapper[4769]: I1125 11:10:56.243340 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:10:56 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:10:56 crc kubenswrapper[4769]: > Nov 25 11:11:00 crc kubenswrapper[4769]: I1125 11:11:00.165636 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:00 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:00 crc kubenswrapper[4769]: > Nov 25 11:11:00 crc kubenswrapper[4769]: I1125 11:11:00.935111 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:00 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:00 crc kubenswrapper[4769]: > Nov 25 11:11:01 crc kubenswrapper[4769]: I1125 11:11:01.017234 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:01 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:01 crc kubenswrapper[4769]: > Nov 25 11:11:02 crc kubenswrapper[4769]: I1125 11:11:02.239890 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:02 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:02 crc kubenswrapper[4769]: > Nov 25 11:11:02 crc kubenswrapper[4769]: I1125 11:11:02.741387 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:02 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:02 crc kubenswrapper[4769]: > Nov 25 11:11:03 crc kubenswrapper[4769]: I1125 11:11:03.068632 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:03 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:03 crc kubenswrapper[4769]: > Nov 25 11:11:03 crc kubenswrapper[4769]: I1125 11:11:03.505943 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:03 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:03 crc kubenswrapper[4769]: > Nov 25 11:11:04 crc kubenswrapper[4769]: I1125 11:11:04.209370 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:04 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:04 crc kubenswrapper[4769]: > Nov 25 11:11:04 crc kubenswrapper[4769]: I1125 11:11:04.639293 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:04 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:04 crc kubenswrapper[4769]: > Nov 25 11:11:05 crc kubenswrapper[4769]: I1125 11:11:05.188507 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:05 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:05 crc kubenswrapper[4769]: > Nov 25 11:11:06 crc kubenswrapper[4769]: I1125 11:11:06.231937 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:06 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:06 crc kubenswrapper[4769]: > Nov 25 11:11:10 crc kubenswrapper[4769]: I1125 11:11:10.163776 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:10 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:10 crc kubenswrapper[4769]: > Nov 25 11:11:10 crc kubenswrapper[4769]: I1125 11:11:10.237740 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:11:10 crc kubenswrapper[4769]: E1125 11:11:10.238288 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:11:11 crc kubenswrapper[4769]: I1125 11:11:11.430358 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:11 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:11 crc kubenswrapper[4769]: > Nov 25 11:11:11 crc kubenswrapper[4769]: I1125 11:11:11.431286 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:11 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:11 crc kubenswrapper[4769]: > Nov 25 11:11:12 crc kubenswrapper[4769]: I1125 11:11:12.252636 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:12 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:12 crc kubenswrapper[4769]: > Nov 25 11:11:12 crc kubenswrapper[4769]: I1125 11:11:12.736800 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:12 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:12 crc kubenswrapper[4769]: > Nov 25 11:11:13 crc kubenswrapper[4769]: I1125 11:11:13.074029 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:13 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:13 crc kubenswrapper[4769]: > Nov 25 11:11:13 crc kubenswrapper[4769]: I1125 11:11:13.495497 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:13 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:13 crc kubenswrapper[4769]: > Nov 25 11:11:14 crc kubenswrapper[4769]: I1125 11:11:14.207948 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:14 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:14 crc kubenswrapper[4769]: > Nov 25 11:11:14 crc kubenswrapper[4769]: I1125 11:11:14.627287 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:14 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:14 crc kubenswrapper[4769]: > Nov 25 11:11:15 crc kubenswrapper[4769]: I1125 11:11:15.187581 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:15 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:15 crc kubenswrapper[4769]: > Nov 25 11:11:16 crc kubenswrapper[4769]: I1125 11:11:16.248667 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:16 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:16 crc kubenswrapper[4769]: > Nov 25 11:11:20 crc kubenswrapper[4769]: I1125 11:11:20.161808 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:20 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:20 crc kubenswrapper[4769]: > Nov 25 11:11:20 crc kubenswrapper[4769]: I1125 11:11:20.936454 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:20 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:20 crc kubenswrapper[4769]: > Nov 25 11:11:21 crc kubenswrapper[4769]: I1125 11:11:21.010598 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:21 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:21 crc kubenswrapper[4769]: > Nov 25 11:11:22 crc kubenswrapper[4769]: I1125 11:11:22.247580 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:22 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:22 crc kubenswrapper[4769]: > Nov 25 11:11:22 crc kubenswrapper[4769]: I1125 11:11:22.731492 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:22 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:22 crc kubenswrapper[4769]: > Nov 25 11:11:23 crc kubenswrapper[4769]: I1125 11:11:23.071697 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:23 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:23 crc kubenswrapper[4769]: > Nov 25 11:11:24 crc kubenswrapper[4769]: I1125 11:11:24.033582 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:24 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:24 crc kubenswrapper[4769]: > Nov 25 11:11:24 crc kubenswrapper[4769]: I1125 11:11:24.219185 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:24 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:24 crc kubenswrapper[4769]: > Nov 25 11:11:24 crc kubenswrapper[4769]: I1125 11:11:24.237986 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:11:24 crc kubenswrapper[4769]: E1125 11:11:24.238250 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:11:24 crc kubenswrapper[4769]: I1125 11:11:24.644407 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:24 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:24 crc kubenswrapper[4769]: > Nov 25 11:11:25 crc kubenswrapper[4769]: I1125 11:11:25.185264 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:25 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:25 crc kubenswrapper[4769]: > Nov 25 11:11:26 crc kubenswrapper[4769]: I1125 11:11:26.224955 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:26 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:26 crc kubenswrapper[4769]: > Nov 25 11:11:30 crc kubenswrapper[4769]: I1125 11:11:30.176507 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:30 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:30 crc kubenswrapper[4769]: > Nov 25 11:11:30 crc kubenswrapper[4769]: I1125 11:11:30.934078 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:30 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:30 crc kubenswrapper[4769]: > Nov 25 11:11:31 crc kubenswrapper[4769]: I1125 11:11:31.011810 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:31 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:31 crc kubenswrapper[4769]: > Nov 25 11:11:32 crc kubenswrapper[4769]: I1125 11:11:32.234937 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:32 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:32 crc kubenswrapper[4769]: > Nov 25 11:11:32 crc kubenswrapper[4769]: I1125 11:11:32.732574 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:32 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:32 crc kubenswrapper[4769]: > Nov 25 11:11:33 crc kubenswrapper[4769]: I1125 11:11:33.073798 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:33 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:33 crc kubenswrapper[4769]: > Nov 25 11:11:33 crc kubenswrapper[4769]: I1125 11:11:33.483007 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:33 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:33 crc kubenswrapper[4769]: > Nov 25 11:11:34 crc kubenswrapper[4769]: I1125 11:11:34.733523 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:34 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:34 crc kubenswrapper[4769]: > Nov 25 11:11:34 crc kubenswrapper[4769]: I1125 11:11:34.739026 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:34 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:34 crc kubenswrapper[4769]: > Nov 25 11:11:35 crc kubenswrapper[4769]: I1125 11:11:35.185098 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:35 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:35 crc kubenswrapper[4769]: > Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.228881 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:36 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:36 crc kubenswrapper[4769]: > Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.268111 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-46zxd"] Nov 25 11:11:36 crc kubenswrapper[4769]: E1125 11:11:36.275186 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerName="extract-utilities" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.275240 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerName="extract-utilities" Nov 25 11:11:36 crc kubenswrapper[4769]: E1125 11:11:36.275601 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerName="registry-server" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.275613 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerName="registry-server" Nov 25 11:11:36 crc kubenswrapper[4769]: E1125 11:11:36.275664 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerName="extract-content" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.275676 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerName="extract-content" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.277954 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="65d1fe44-2bb2-4d58-8bcf-7e15ab70164c" containerName="registry-server" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.285739 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.379529 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db9dc5e0-3558-4ce3-bc87-611a67d7e949-utilities\") pod \"certified-operators-46zxd\" (UID: \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\") " pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.379742 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5q7m\" (UniqueName: \"kubernetes.io/projected/db9dc5e0-3558-4ce3-bc87-611a67d7e949-kube-api-access-x5q7m\") pod \"certified-operators-46zxd\" (UID: \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\") " pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.379877 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db9dc5e0-3558-4ce3-bc87-611a67d7e949-catalog-content\") pod \"certified-operators-46zxd\" (UID: \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\") " pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.464167 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-46zxd"] Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.483909 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db9dc5e0-3558-4ce3-bc87-611a67d7e949-utilities\") pod \"certified-operators-46zxd\" (UID: \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\") " pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.484576 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5q7m\" (UniqueName: \"kubernetes.io/projected/db9dc5e0-3558-4ce3-bc87-611a67d7e949-kube-api-access-x5q7m\") pod \"certified-operators-46zxd\" (UID: \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\") " pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.484655 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db9dc5e0-3558-4ce3-bc87-611a67d7e949-catalog-content\") pod \"certified-operators-46zxd\" (UID: \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\") " pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.487275 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db9dc5e0-3558-4ce3-bc87-611a67d7e949-catalog-content\") pod \"certified-operators-46zxd\" (UID: \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\") " pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.488116 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db9dc5e0-3558-4ce3-bc87-611a67d7e949-utilities\") pod \"certified-operators-46zxd\" (UID: \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\") " pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.529861 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5q7m\" (UniqueName: \"kubernetes.io/projected/db9dc5e0-3558-4ce3-bc87-611a67d7e949-kube-api-access-x5q7m\") pod \"certified-operators-46zxd\" (UID: \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\") " pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:36 crc kubenswrapper[4769]: I1125 11:11:36.630494 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:39 crc kubenswrapper[4769]: I1125 11:11:39.078511 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-46zxd"] Nov 25 11:11:39 crc kubenswrapper[4769]: W1125 11:11:39.107107 4769 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb9dc5e0_3558_4ce3_bc87_611a67d7e949.slice/crio-50086a5814cec1a1d3d2547f5e73826a6be611b67d4164a66538d623cc2ec628 WatchSource:0}: Error finding container 50086a5814cec1a1d3d2547f5e73826a6be611b67d4164a66538d623cc2ec628: Status 404 returned error can't find the container with id 50086a5814cec1a1d3d2547f5e73826a6be611b67d4164a66538d623cc2ec628 Nov 25 11:11:39 crc kubenswrapper[4769]: I1125 11:11:39.238890 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:11:39 crc kubenswrapper[4769]: E1125 11:11:39.239303 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:11:39 crc kubenswrapper[4769]: I1125 11:11:39.475568 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-46zxd" event={"ID":"db9dc5e0-3558-4ce3-bc87-611a67d7e949","Type":"ContainerStarted","Data":"50086a5814cec1a1d3d2547f5e73826a6be611b67d4164a66538d623cc2ec628"} Nov 25 11:11:40 crc kubenswrapper[4769]: I1125 11:11:40.162955 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:40 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:40 crc kubenswrapper[4769]: > Nov 25 11:11:40 crc kubenswrapper[4769]: I1125 11:11:40.488951 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-46zxd" event={"ID":"db9dc5e0-3558-4ce3-bc87-611a67d7e949","Type":"ContainerDied","Data":"b1947a246f56bb26a18ea0b7889b8c1f5fb8e78db2e49e9d4c50931f2221e415"} Nov 25 11:11:40 crc kubenswrapper[4769]: I1125 11:11:40.490153 4769 generic.go:334] "Generic (PLEG): container finished" podID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerID="b1947a246f56bb26a18ea0b7889b8c1f5fb8e78db2e49e9d4c50931f2221e415" exitCode=0 Nov 25 11:11:40 crc kubenswrapper[4769]: I1125 11:11:40.940627 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:40 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:40 crc kubenswrapper[4769]: > Nov 25 11:11:41 crc kubenswrapper[4769]: I1125 11:11:41.018068 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:41 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:41 crc kubenswrapper[4769]: > Nov 25 11:11:42 crc kubenswrapper[4769]: I1125 11:11:42.236484 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:42 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:42 crc kubenswrapper[4769]: > Nov 25 11:11:42 crc kubenswrapper[4769]: I1125 11:11:42.531387 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-46zxd" event={"ID":"db9dc5e0-3558-4ce3-bc87-611a67d7e949","Type":"ContainerStarted","Data":"88f8034a6179798fc833867588e7a6802c0e2939672e60a5efe1e17af31fa139"} Nov 25 11:11:42 crc kubenswrapper[4769]: I1125 11:11:42.732590 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:42 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:42 crc kubenswrapper[4769]: > Nov 25 11:11:43 crc kubenswrapper[4769]: I1125 11:11:43.073402 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:43 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:43 crc kubenswrapper[4769]: > Nov 25 11:11:43 crc kubenswrapper[4769]: I1125 11:11:43.496683 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:43 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:43 crc kubenswrapper[4769]: > Nov 25 11:11:44 crc kubenswrapper[4769]: I1125 11:11:44.214021 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:44 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:44 crc kubenswrapper[4769]: > Nov 25 11:11:44 crc kubenswrapper[4769]: I1125 11:11:44.624924 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:44 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:44 crc kubenswrapper[4769]: > Nov 25 11:11:45 crc kubenswrapper[4769]: I1125 11:11:45.202112 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:45 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:45 crc kubenswrapper[4769]: > Nov 25 11:11:45 crc kubenswrapper[4769]: I1125 11:11:45.565470 4769 generic.go:334] "Generic (PLEG): container finished" podID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerID="88f8034a6179798fc833867588e7a6802c0e2939672e60a5efe1e17af31fa139" exitCode=0 Nov 25 11:11:45 crc kubenswrapper[4769]: I1125 11:11:45.565925 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-46zxd" event={"ID":"db9dc5e0-3558-4ce3-bc87-611a67d7e949","Type":"ContainerDied","Data":"88f8034a6179798fc833867588e7a6802c0e2939672e60a5efe1e17af31fa139"} Nov 25 11:11:46 crc kubenswrapper[4769]: I1125 11:11:46.233879 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:46 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:46 crc kubenswrapper[4769]: > Nov 25 11:11:46 crc kubenswrapper[4769]: I1125 11:11:46.578412 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-46zxd" event={"ID":"db9dc5e0-3558-4ce3-bc87-611a67d7e949","Type":"ContainerStarted","Data":"1628ef1944c9a2b1a8dc90af31d22270f12da1c34ac27200a4cd031389cfa727"} Nov 25 11:11:46 crc kubenswrapper[4769]: I1125 11:11:46.598881 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-46zxd" podStartSLOduration=5.064107212 podStartE2EDuration="10.596881485s" podCreationTimestamp="2025-11-25 11:11:36 +0000 UTC" firstStartedPulling="2025-11-25 11:11:40.49103508 +0000 UTC m=+5249.076007393" lastFinishedPulling="2025-11-25 11:11:46.023809353 +0000 UTC m=+5254.608781666" observedRunningTime="2025-11-25 11:11:46.59293956 +0000 UTC m=+5255.177911873" watchObservedRunningTime="2025-11-25 11:11:46.596881485 +0000 UTC m=+5255.181853808" Nov 25 11:11:46 crc kubenswrapper[4769]: I1125 11:11:46.630922 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:46 crc kubenswrapper[4769]: I1125 11:11:46.630992 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:11:47 crc kubenswrapper[4769]: I1125 11:11:47.683489 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-46zxd" podUID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:47 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:47 crc kubenswrapper[4769]: > Nov 25 11:11:50 crc kubenswrapper[4769]: I1125 11:11:50.162601 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:50 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:50 crc kubenswrapper[4769]: > Nov 25 11:11:50 crc kubenswrapper[4769]: I1125 11:11:50.163197 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:11:50 crc kubenswrapper[4769]: I1125 11:11:50.165177 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"0f188e2c4c0b837c01d6ec185b9309ce415641bdf1e752b67ec2071fe9681798"} pod="openshift-marketplace/redhat-operators-9v54j" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:11:50 crc kubenswrapper[4769]: I1125 11:11:50.165604 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" containerID="cri-o://0f188e2c4c0b837c01d6ec185b9309ce415641bdf1e752b67ec2071fe9681798" gracePeriod=30 Nov 25 11:11:50 crc kubenswrapper[4769]: I1125 11:11:50.948042 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:50 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:50 crc kubenswrapper[4769]: > Nov 25 11:11:51 crc kubenswrapper[4769]: I1125 11:11:51.014521 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:51 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:51 crc kubenswrapper[4769]: > Nov 25 11:11:51 crc kubenswrapper[4769]: I1125 11:11:51.014596 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:11:51 crc kubenswrapper[4769]: I1125 11:11:51.015524 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"9efaa7da0c863469625d1dad8af55684166514f7474256c07dcaa91b94506e08"} pod="openshift-marketplace/redhat-operators-gkx9w" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:11:51 crc kubenswrapper[4769]: I1125 11:11:51.015580 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" containerID="cri-o://9efaa7da0c863469625d1dad8af55684166514f7474256c07dcaa91b94506e08" gracePeriod=30 Nov 25 11:11:52 crc kubenswrapper[4769]: I1125 11:11:52.239762 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:52 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:52 crc kubenswrapper[4769]: > Nov 25 11:11:52 crc kubenswrapper[4769]: I1125 11:11:52.253676 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:11:52 crc kubenswrapper[4769]: I1125 11:11:52.254187 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"c11092b5f3f5b634a49406a81945567aba55f7e8eba28dfd0eb51eb84b1e6c5b"} pod="openshift-marketplace/redhat-operators-856hd" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:11:52 crc kubenswrapper[4769]: I1125 11:11:52.254263 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" containerID="cri-o://c11092b5f3f5b634a49406a81945567aba55f7e8eba28dfd0eb51eb84b1e6c5b" gracePeriod=30 Nov 25 11:11:52 crc kubenswrapper[4769]: I1125 11:11:52.738788 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:52 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:52 crc kubenswrapper[4769]: > Nov 25 11:11:53 crc kubenswrapper[4769]: I1125 11:11:53.083294 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:53 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:53 crc kubenswrapper[4769]: > Nov 25 11:11:53 crc kubenswrapper[4769]: I1125 11:11:53.238274 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:11:53 crc kubenswrapper[4769]: E1125 11:11:53.238644 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:11:53 crc kubenswrapper[4769]: I1125 11:11:53.484541 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:53 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:53 crc kubenswrapper[4769]: > Nov 25 11:11:53 crc kubenswrapper[4769]: I1125 11:11:53.484641 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:11:53 crc kubenswrapper[4769]: I1125 11:11:53.485652 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41"} pod="openshift-marketplace/redhat-operators-vr22m" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:11:53 crc kubenswrapper[4769]: I1125 11:11:53.485691 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" containerID="cri-o://7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41" gracePeriod=30 Nov 25 11:11:54 crc kubenswrapper[4769]: I1125 11:11:54.205737 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:54 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:54 crc kubenswrapper[4769]: > Nov 25 11:11:54 crc kubenswrapper[4769]: I1125 11:11:54.638140 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:54 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:54 crc kubenswrapper[4769]: > Nov 25 11:11:54 crc kubenswrapper[4769]: I1125 11:11:54.640141 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:11:54 crc kubenswrapper[4769]: I1125 11:11:54.641712 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"29fdfb935697aee46562dcc97a060e9c451fb96efb86b1264bca18990f56b411"} pod="openshift-marketplace/redhat-operators-h2wlq" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:11:54 crc kubenswrapper[4769]: I1125 11:11:54.641757 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" containerID="cri-o://29fdfb935697aee46562dcc97a060e9c451fb96efb86b1264bca18990f56b411" gracePeriod=30 Nov 25 11:11:55 crc kubenswrapper[4769]: I1125 11:11:55.190519 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:55 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:55 crc kubenswrapper[4769]: > Nov 25 11:11:56 crc kubenswrapper[4769]: I1125 11:11:56.236104 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:56 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:56 crc kubenswrapper[4769]: > Nov 25 11:11:57 crc kubenswrapper[4769]: I1125 11:11:57.725557 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-46zxd" podUID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:57 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:57 crc kubenswrapper[4769]: > Nov 25 11:12:01 crc kubenswrapper[4769]: I1125 11:12:01.854022 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:01 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:01 crc kubenswrapper[4769]: > Nov 25 11:12:02 crc kubenswrapper[4769]: I1125 11:12:02.740651 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:02 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:02 crc kubenswrapper[4769]: > Nov 25 11:12:03 crc kubenswrapper[4769]: I1125 11:12:03.078074 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:03 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:03 crc kubenswrapper[4769]: > Nov 25 11:12:04 crc kubenswrapper[4769]: I1125 11:12:04.213401 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:04 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:04 crc kubenswrapper[4769]: > Nov 25 11:12:05 crc kubenswrapper[4769]: I1125 11:12:05.193088 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:05 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:05 crc kubenswrapper[4769]: > Nov 25 11:12:05 crc kubenswrapper[4769]: I1125 11:12:05.237609 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:12:05 crc kubenswrapper[4769]: E1125 11:12:05.238007 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:12:06 crc kubenswrapper[4769]: I1125 11:12:06.229764 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:06 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:06 crc kubenswrapper[4769]: > Nov 25 11:12:06 crc kubenswrapper[4769]: I1125 11:12:06.230167 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:12:06 crc kubenswrapper[4769]: I1125 11:12:06.231272 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"2a261c0d637945ef58e5165df0fc32835e944bf6bd07452f4b65407653066e62"} pod="openshift-marketplace/redhat-operators-9hv66" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:12:06 crc kubenswrapper[4769]: I1125 11:12:06.231335 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" containerID="cri-o://2a261c0d637945ef58e5165df0fc32835e944bf6bd07452f4b65407653066e62" gracePeriod=30 Nov 25 11:12:07 crc kubenswrapper[4769]: I1125 11:12:07.694318 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-46zxd" podUID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:07 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:07 crc kubenswrapper[4769]: > Nov 25 11:12:10 crc kubenswrapper[4769]: I1125 11:12:10.940905 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:10 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:10 crc kubenswrapper[4769]: > Nov 25 11:12:10 crc kubenswrapper[4769]: I1125 11:12:10.962210 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:12:10 crc kubenswrapper[4769]: I1125 11:12:10.963760 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"88daf248b5618ddfa14959abc53a3a1d3536978e0cd35c21a95568586424539e"} pod="openshift-marketplace/redhat-operators-gwvnd" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:12:10 crc kubenswrapper[4769]: I1125 11:12:10.963806 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" containerID="cri-o://88daf248b5618ddfa14959abc53a3a1d3536978e0cd35c21a95568586424539e" gracePeriod=30 Nov 25 11:12:12 crc kubenswrapper[4769]: I1125 11:12:12.734889 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:12 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:12 crc kubenswrapper[4769]: > Nov 25 11:12:12 crc kubenswrapper[4769]: I1125 11:12:12.735439 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:12:12 crc kubenswrapper[4769]: I1125 11:12:12.736528 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"4ba6f479383af1853419c0467f8b024327a29a7aa073259cad5c62f4e347f088"} pod="openshift-marketplace/redhat-operators-h57kt" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:12:12 crc kubenswrapper[4769]: I1125 11:12:12.736574 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" containerID="cri-o://4ba6f479383af1853419c0467f8b024327a29a7aa073259cad5c62f4e347f088" gracePeriod=30 Nov 25 11:12:13 crc kubenswrapper[4769]: I1125 11:12:13.073873 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:13 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:13 crc kubenswrapper[4769]: > Nov 25 11:12:13 crc kubenswrapper[4769]: I1125 11:12:13.073958 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:12:13 crc kubenswrapper[4769]: I1125 11:12:13.074551 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"06b1191036f240719dbe9a83b3904364f6e7209c7ca666c6105627d6be466272"} pod="openshift-marketplace/redhat-operators-ccknj" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:12:13 crc kubenswrapper[4769]: I1125 11:12:13.074587 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" containerID="cri-o://06b1191036f240719dbe9a83b3904364f6e7209c7ca666c6105627d6be466272" gracePeriod=30 Nov 25 11:12:14 crc kubenswrapper[4769]: I1125 11:12:14.203113 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:14 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:14 crc kubenswrapper[4769]: > Nov 25 11:12:14 crc kubenswrapper[4769]: I1125 11:12:14.203604 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:12:14 crc kubenswrapper[4769]: I1125 11:12:14.205145 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"169354e3899914d79722f328dea57dfd7c4398163494f6544ce032cba9e0b620"} pod="openshift-marketplace/redhat-operators-pq2q8" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:12:14 crc kubenswrapper[4769]: I1125 11:12:14.205197 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" containerID="cri-o://169354e3899914d79722f328dea57dfd7c4398163494f6544ce032cba9e0b620" gracePeriod=30 Nov 25 11:12:15 crc kubenswrapper[4769]: I1125 11:12:15.223497 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:15 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:15 crc kubenswrapper[4769]: > Nov 25 11:12:15 crc kubenswrapper[4769]: I1125 11:12:15.224673 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:12:15 crc kubenswrapper[4769]: I1125 11:12:15.226024 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c"} pod="openshift-marketplace/redhat-operators-c6g96" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 25 11:12:15 crc kubenswrapper[4769]: I1125 11:12:15.226167 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" containerID="cri-o://eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c" gracePeriod=30 Nov 25 11:12:16 crc kubenswrapper[4769]: I1125 11:12:16.719140 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:12:16 crc kubenswrapper[4769]: I1125 11:12:16.779013 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:12:16 crc kubenswrapper[4769]: I1125 11:12:16.971481 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-46zxd"] Nov 25 11:12:17 crc kubenswrapper[4769]: I1125 11:12:17.965995 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-46zxd" podUID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerName="registry-server" containerID="cri-o://1628ef1944c9a2b1a8dc90af31d22270f12da1c34ac27200a4cd031389cfa727" gracePeriod=2 Nov 25 11:12:18 crc kubenswrapper[4769]: I1125 11:12:18.982688 4769 generic.go:334] "Generic (PLEG): container finished" podID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerID="1628ef1944c9a2b1a8dc90af31d22270f12da1c34ac27200a4cd031389cfa727" exitCode=0 Nov 25 11:12:18 crc kubenswrapper[4769]: I1125 11:12:18.982771 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-46zxd" event={"ID":"db9dc5e0-3558-4ce3-bc87-611a67d7e949","Type":"ContainerDied","Data":"1628ef1944c9a2b1a8dc90af31d22270f12da1c34ac27200a4cd031389cfa727"} Nov 25 11:12:19 crc kubenswrapper[4769]: I1125 11:12:19.594610 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:12:19 crc kubenswrapper[4769]: I1125 11:12:19.746517 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5q7m\" (UniqueName: \"kubernetes.io/projected/db9dc5e0-3558-4ce3-bc87-611a67d7e949-kube-api-access-x5q7m\") pod \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\" (UID: \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\") " Nov 25 11:12:19 crc kubenswrapper[4769]: I1125 11:12:19.746781 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db9dc5e0-3558-4ce3-bc87-611a67d7e949-catalog-content\") pod \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\" (UID: \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\") " Nov 25 11:12:19 crc kubenswrapper[4769]: I1125 11:12:19.746925 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db9dc5e0-3558-4ce3-bc87-611a67d7e949-utilities\") pod \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\" (UID: \"db9dc5e0-3558-4ce3-bc87-611a67d7e949\") " Nov 25 11:12:19 crc kubenswrapper[4769]: I1125 11:12:19.749488 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db9dc5e0-3558-4ce3-bc87-611a67d7e949-utilities" (OuterVolumeSpecName: "utilities") pod "db9dc5e0-3558-4ce3-bc87-611a67d7e949" (UID: "db9dc5e0-3558-4ce3-bc87-611a67d7e949"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:12:19 crc kubenswrapper[4769]: I1125 11:12:19.852033 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db9dc5e0-3558-4ce3-bc87-611a67d7e949-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:12:19 crc kubenswrapper[4769]: I1125 11:12:19.890440 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db9dc5e0-3558-4ce3-bc87-611a67d7e949-kube-api-access-x5q7m" (OuterVolumeSpecName: "kube-api-access-x5q7m") pod "db9dc5e0-3558-4ce3-bc87-611a67d7e949" (UID: "db9dc5e0-3558-4ce3-bc87-611a67d7e949"). InnerVolumeSpecName "kube-api-access-x5q7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:12:19 crc kubenswrapper[4769]: I1125 11:12:19.919457 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db9dc5e0-3558-4ce3-bc87-611a67d7e949-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "db9dc5e0-3558-4ce3-bc87-611a67d7e949" (UID: "db9dc5e0-3558-4ce3-bc87-611a67d7e949"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:12:19 crc kubenswrapper[4769]: I1125 11:12:19.955459 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5q7m\" (UniqueName: \"kubernetes.io/projected/db9dc5e0-3558-4ce3-bc87-611a67d7e949-kube-api-access-x5q7m\") on node \"crc\" DevicePath \"\"" Nov 25 11:12:19 crc kubenswrapper[4769]: I1125 11:12:19.955507 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db9dc5e0-3558-4ce3-bc87-611a67d7e949-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:12:20 crc kubenswrapper[4769]: I1125 11:12:20.030655 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-46zxd" event={"ID":"db9dc5e0-3558-4ce3-bc87-611a67d7e949","Type":"ContainerDied","Data":"50086a5814cec1a1d3d2547f5e73826a6be611b67d4164a66538d623cc2ec628"} Nov 25 11:12:20 crc kubenswrapper[4769]: I1125 11:12:20.030709 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-46zxd" Nov 25 11:12:20 crc kubenswrapper[4769]: I1125 11:12:20.030714 4769 scope.go:117] "RemoveContainer" containerID="1628ef1944c9a2b1a8dc90af31d22270f12da1c34ac27200a4cd031389cfa727" Nov 25 11:12:20 crc kubenswrapper[4769]: I1125 11:12:20.148907 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-46zxd"] Nov 25 11:12:20 crc kubenswrapper[4769]: I1125 11:12:20.165263 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-46zxd"] Nov 25 11:12:20 crc kubenswrapper[4769]: I1125 11:12:20.167582 4769 scope.go:117] "RemoveContainer" containerID="88f8034a6179798fc833867588e7a6802c0e2939672e60a5efe1e17af31fa139" Nov 25 11:12:20 crc kubenswrapper[4769]: I1125 11:12:20.218922 4769 scope.go:117] "RemoveContainer" containerID="b1947a246f56bb26a18ea0b7889b8c1f5fb8e78db2e49e9d4c50931f2221e415" Nov 25 11:12:20 crc kubenswrapper[4769]: I1125 11:12:20.237332 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:12:20 crc kubenswrapper[4769]: E1125 11:12:20.237626 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:12:20 crc kubenswrapper[4769]: I1125 11:12:20.252852 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" path="/var/lib/kubelet/pods/db9dc5e0-3558-4ce3-bc87-611a67d7e949/volumes" Nov 25 11:12:21 crc kubenswrapper[4769]: I1125 11:12:21.093774 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9v54j_bdd2d26d-cc13-41dd-9e57-fd8e81c627fe/registry-server/0.log" Nov 25 11:12:21 crc kubenswrapper[4769]: I1125 11:12:21.096191 4769 generic.go:334] "Generic (PLEG): container finished" podID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerID="0f188e2c4c0b837c01d6ec185b9309ce415641bdf1e752b67ec2071fe9681798" exitCode=137 Nov 25 11:12:21 crc kubenswrapper[4769]: I1125 11:12:21.096260 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9v54j" event={"ID":"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe","Type":"ContainerDied","Data":"0f188e2c4c0b837c01d6ec185b9309ce415641bdf1e752b67ec2071fe9681798"} Nov 25 11:12:22 crc kubenswrapper[4769]: I1125 11:12:22.108446 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gkx9w_f1b1fb95-30bd-457e-b59c-c06e6b770f42/registry-server/0.log" Nov 25 11:12:22 crc kubenswrapper[4769]: I1125 11:12:22.109562 4769 generic.go:334] "Generic (PLEG): container finished" podID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerID="9efaa7da0c863469625d1dad8af55684166514f7474256c07dcaa91b94506e08" exitCode=137 Nov 25 11:12:22 crc kubenswrapper[4769]: I1125 11:12:22.109660 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkx9w" event={"ID":"f1b1fb95-30bd-457e-b59c-c06e6b770f42","Type":"ContainerDied","Data":"9efaa7da0c863469625d1dad8af55684166514f7474256c07dcaa91b94506e08"} Nov 25 11:12:22 crc kubenswrapper[4769]: I1125 11:12:22.111951 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9v54j_bdd2d26d-cc13-41dd-9e57-fd8e81c627fe/registry-server/0.log" Nov 25 11:12:22 crc kubenswrapper[4769]: I1125 11:12:22.112816 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9v54j" event={"ID":"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe","Type":"ContainerStarted","Data":"b836d03926facb035157fc561fa40e4d5b0eeaa459d2dc5ec4aa8abf8a9cb149"} Nov 25 11:12:23 crc kubenswrapper[4769]: I1125 11:12:23.126024 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-856hd_4a6225f9-cdc5-486a-b813-db81a752fade/registry-server/0.log" Nov 25 11:12:23 crc kubenswrapper[4769]: I1125 11:12:23.127476 4769 generic.go:334] "Generic (PLEG): container finished" podID="4a6225f9-cdc5-486a-b813-db81a752fade" containerID="c11092b5f3f5b634a49406a81945567aba55f7e8eba28dfd0eb51eb84b1e6c5b" exitCode=137 Nov 25 11:12:23 crc kubenswrapper[4769]: I1125 11:12:23.127551 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-856hd" event={"ID":"4a6225f9-cdc5-486a-b813-db81a752fade","Type":"ContainerDied","Data":"c11092b5f3f5b634a49406a81945567aba55f7e8eba28dfd0eb51eb84b1e6c5b"} Nov 25 11:12:23 crc kubenswrapper[4769]: I1125 11:12:23.130412 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gkx9w_f1b1fb95-30bd-457e-b59c-c06e6b770f42/registry-server/0.log" Nov 25 11:12:23 crc kubenswrapper[4769]: I1125 11:12:23.131361 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkx9w" event={"ID":"f1b1fb95-30bd-457e-b59c-c06e6b770f42","Type":"ContainerStarted","Data":"97d3e2f5500f546826a966cbcf97eda09bab5c7837cf4b791ecb7db4bc235c6a"} Nov 25 11:12:24 crc kubenswrapper[4769]: I1125 11:12:24.144547 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-856hd_4a6225f9-cdc5-486a-b813-db81a752fade/registry-server/0.log" Nov 25 11:12:24 crc kubenswrapper[4769]: I1125 11:12:24.146023 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-856hd" event={"ID":"4a6225f9-cdc5-486a-b813-db81a752fade","Type":"ContainerStarted","Data":"7bbb8e9e5221e6b1e6bd2d68d5535139e6efe50a4d3ba16c44a019da0fb5c1db"} Nov 25 11:12:24 crc kubenswrapper[4769]: I1125 11:12:24.152687 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vr22m_e9d04b78-f610-43ce-bc66-ca181acf6654/registry-server/0.log" Nov 25 11:12:24 crc kubenswrapper[4769]: I1125 11:12:24.153649 4769 generic.go:334] "Generic (PLEG): container finished" podID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerID="7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41" exitCode=137 Nov 25 11:12:24 crc kubenswrapper[4769]: I1125 11:12:24.153720 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vr22m" event={"ID":"e9d04b78-f610-43ce-bc66-ca181acf6654","Type":"ContainerDied","Data":"7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41"} Nov 25 11:12:25 crc kubenswrapper[4769]: I1125 11:12:25.169366 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h2wlq_993bc795-fde2-4876-9e76-e97a6095576d/registry-server/0.log" Nov 25 11:12:25 crc kubenswrapper[4769]: I1125 11:12:25.171209 4769 generic.go:334] "Generic (PLEG): container finished" podID="993bc795-fde2-4876-9e76-e97a6095576d" containerID="29fdfb935697aee46562dcc97a060e9c451fb96efb86b1264bca18990f56b411" exitCode=137 Nov 25 11:12:25 crc kubenswrapper[4769]: I1125 11:12:25.171297 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h2wlq" event={"ID":"993bc795-fde2-4876-9e76-e97a6095576d","Type":"ContainerDied","Data":"29fdfb935697aee46562dcc97a060e9c451fb96efb86b1264bca18990f56b411"} Nov 25 11:12:26 crc kubenswrapper[4769]: I1125 11:12:26.185642 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vr22m_e9d04b78-f610-43ce-bc66-ca181acf6654/registry-server/0.log" Nov 25 11:12:26 crc kubenswrapper[4769]: I1125 11:12:26.187038 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vr22m" event={"ID":"e9d04b78-f610-43ce-bc66-ca181acf6654","Type":"ContainerStarted","Data":"831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5"} Nov 25 11:12:27 crc kubenswrapper[4769]: I1125 11:12:27.235367 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h2wlq_993bc795-fde2-4876-9e76-e97a6095576d/registry-server/0.log" Nov 25 11:12:27 crc kubenswrapper[4769]: I1125 11:12:27.236812 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h2wlq" event={"ID":"993bc795-fde2-4876-9e76-e97a6095576d","Type":"ContainerStarted","Data":"c321d22a1ae6fa2b8d2be1a746c67da60ad4d81030707dc7e25df63a208f8830"} Nov 25 11:12:29 crc kubenswrapper[4769]: I1125 11:12:29.102643 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:12:29 crc kubenswrapper[4769]: I1125 11:12:29.103016 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:12:29 crc kubenswrapper[4769]: I1125 11:12:29.963638 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:12:29 crc kubenswrapper[4769]: I1125 11:12:29.965301 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:12:30 crc kubenswrapper[4769]: I1125 11:12:30.170330 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:30 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:30 crc kubenswrapper[4769]: > Nov 25 11:12:31 crc kubenswrapper[4769]: I1125 11:12:31.013628 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:31 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:31 crc kubenswrapper[4769]: > Nov 25 11:12:31 crc kubenswrapper[4769]: I1125 11:12:31.183730 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:12:31 crc kubenswrapper[4769]: I1125 11:12:31.184446 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:12:32 crc kubenswrapper[4769]: I1125 11:12:32.238773 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:32 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:32 crc kubenswrapper[4769]: > Nov 25 11:12:32 crc kubenswrapper[4769]: I1125 11:12:32.248540 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:12:32 crc kubenswrapper[4769]: E1125 11:12:32.248914 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:12:32 crc kubenswrapper[4769]: I1125 11:12:32.435109 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:12:32 crc kubenswrapper[4769]: I1125 11:12:32.435161 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:12:33 crc kubenswrapper[4769]: I1125 11:12:33.512733 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:33 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:33 crc kubenswrapper[4769]: > Nov 25 11:12:33 crc kubenswrapper[4769]: I1125 11:12:33.574853 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:12:33 crc kubenswrapper[4769]: I1125 11:12:33.574899 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:12:34 crc kubenswrapper[4769]: I1125 11:12:34.631670 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:34 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:34 crc kubenswrapper[4769]: > Nov 25 11:12:36 crc kubenswrapper[4769]: I1125 11:12:36.380013 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9hv66_7e334767-99a9-4a84-bcd7-e3e53d56750a/registry-server/0.log" Nov 25 11:12:36 crc kubenswrapper[4769]: I1125 11:12:36.381818 4769 generic.go:334] "Generic (PLEG): container finished" podID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerID="2a261c0d637945ef58e5165df0fc32835e944bf6bd07452f4b65407653066e62" exitCode=137 Nov 25 11:12:36 crc kubenswrapper[4769]: I1125 11:12:36.381898 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hv66" event={"ID":"7e334767-99a9-4a84-bcd7-e3e53d56750a","Type":"ContainerDied","Data":"2a261c0d637945ef58e5165df0fc32835e944bf6bd07452f4b65407653066e62"} Nov 25 11:12:39 crc kubenswrapper[4769]: I1125 11:12:39.427091 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9hv66_7e334767-99a9-4a84-bcd7-e3e53d56750a/registry-server/0.log" Nov 25 11:12:39 crc kubenswrapper[4769]: I1125 11:12:39.428759 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hv66" event={"ID":"7e334767-99a9-4a84-bcd7-e3e53d56750a","Type":"ContainerStarted","Data":"fc071f5e0723a066e50b815205af45d4795644cf612a0459af9b2cc2a7ceec74"} Nov 25 11:12:40 crc kubenswrapper[4769]: I1125 11:12:40.157274 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:40 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:40 crc kubenswrapper[4769]: > Nov 25 11:12:41 crc kubenswrapper[4769]: I1125 11:12:41.017804 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:41 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:41 crc kubenswrapper[4769]: > Nov 25 11:12:41 crc kubenswrapper[4769]: I1125 11:12:41.452696 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gwvnd_e588820f-3563-4b98-9ba4-85da1cee2821/registry-server/0.log" Nov 25 11:12:41 crc kubenswrapper[4769]: I1125 11:12:41.453579 4769 generic.go:334] "Generic (PLEG): container finished" podID="e588820f-3563-4b98-9ba4-85da1cee2821" containerID="88daf248b5618ddfa14959abc53a3a1d3536978e0cd35c21a95568586424539e" exitCode=137 Nov 25 11:12:41 crc kubenswrapper[4769]: I1125 11:12:41.453630 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvnd" event={"ID":"e588820f-3563-4b98-9ba4-85da1cee2821","Type":"ContainerDied","Data":"88daf248b5618ddfa14959abc53a3a1d3536978e0cd35c21a95568586424539e"} Nov 25 11:12:42 crc kubenswrapper[4769]: I1125 11:12:42.235901 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:42 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:42 crc kubenswrapper[4769]: > Nov 25 11:12:43 crc kubenswrapper[4769]: I1125 11:12:43.481135 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gwvnd_e588820f-3563-4b98-9ba4-85da1cee2821/registry-server/0.log" Nov 25 11:12:43 crc kubenswrapper[4769]: I1125 11:12:43.486984 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h57kt_af5c9899-c379-4d60-9aae-979a20de2ad2/registry-server/0.log" Nov 25 11:12:43 crc kubenswrapper[4769]: I1125 11:12:43.487832 4769 generic.go:334] "Generic (PLEG): container finished" podID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerID="4ba6f479383af1853419c0467f8b024327a29a7aa073259cad5c62f4e347f088" exitCode=137 Nov 25 11:12:43 crc kubenswrapper[4769]: I1125 11:12:43.487895 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h57kt" event={"ID":"af5c9899-c379-4d60-9aae-979a20de2ad2","Type":"ContainerDied","Data":"4ba6f479383af1853419c0467f8b024327a29a7aa073259cad5c62f4e347f088"} Nov 25 11:12:43 crc kubenswrapper[4769]: I1125 11:12:43.489641 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:43 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:43 crc kubenswrapper[4769]: > Nov 25 11:12:43 crc kubenswrapper[4769]: I1125 11:12:43.491189 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ccknj_36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24/registry-server/0.log" Nov 25 11:12:43 crc kubenswrapper[4769]: I1125 11:12:43.492406 4769 generic.go:334] "Generic (PLEG): container finished" podID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerID="06b1191036f240719dbe9a83b3904364f6e7209c7ca666c6105627d6be466272" exitCode=137 Nov 25 11:12:43 crc kubenswrapper[4769]: I1125 11:12:43.492449 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccknj" event={"ID":"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24","Type":"ContainerDied","Data":"06b1191036f240719dbe9a83b3904364f6e7209c7ca666c6105627d6be466272"} Nov 25 11:12:44 crc kubenswrapper[4769]: I1125 11:12:44.532661 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h57kt_af5c9899-c379-4d60-9aae-979a20de2ad2/registry-server/0.log" Nov 25 11:12:44 crc kubenswrapper[4769]: I1125 11:12:44.536635 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h57kt" event={"ID":"af5c9899-c379-4d60-9aae-979a20de2ad2","Type":"ContainerStarted","Data":"86b51cfdab8c3140e15a460449dbe43cf35a1d4a2038624f852243daba5403fc"} Nov 25 11:12:44 crc kubenswrapper[4769]: I1125 11:12:44.540394 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pq2q8_71135c8c-e27a-4ae0-9e15-afe6164def89/registry-server/0.log" Nov 25 11:12:44 crc kubenswrapper[4769]: I1125 11:12:44.542793 4769 generic.go:334] "Generic (PLEG): container finished" podID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerID="169354e3899914d79722f328dea57dfd7c4398163494f6544ce032cba9e0b620" exitCode=137 Nov 25 11:12:44 crc kubenswrapper[4769]: I1125 11:12:44.542892 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pq2q8" event={"ID":"71135c8c-e27a-4ae0-9e15-afe6164def89","Type":"ContainerDied","Data":"169354e3899914d79722f328dea57dfd7c4398163494f6544ce032cba9e0b620"} Nov 25 11:12:44 crc kubenswrapper[4769]: I1125 11:12:44.546072 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gwvnd_e588820f-3563-4b98-9ba4-85da1cee2821/registry-server/0.log" Nov 25 11:12:44 crc kubenswrapper[4769]: I1125 11:12:44.546914 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvnd" event={"ID":"e588820f-3563-4b98-9ba4-85da1cee2821","Type":"ContainerStarted","Data":"a1568be57c7beab61bb0a6b097832669595590c8290f7842f32a3054807d0107"} Nov 25 11:12:44 crc kubenswrapper[4769]: I1125 11:12:44.631096 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:44 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:44 crc kubenswrapper[4769]: > Nov 25 11:12:45 crc kubenswrapper[4769]: I1125 11:12:45.175339 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:12:45 crc kubenswrapper[4769]: I1125 11:12:45.175403 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:12:45 crc kubenswrapper[4769]: I1125 11:12:45.561189 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-c6g96_b831a4b0-2508-4624-b8cb-833a8d5b10ad/registry-server/0.log" Nov 25 11:12:45 crc kubenswrapper[4769]: I1125 11:12:45.564432 4769 generic.go:334] "Generic (PLEG): container finished" podID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerID="eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c" exitCode=137 Nov 25 11:12:45 crc kubenswrapper[4769]: I1125 11:12:45.564539 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6g96" event={"ID":"b831a4b0-2508-4624-b8cb-833a8d5b10ad","Type":"ContainerDied","Data":"eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c"} Nov 25 11:12:45 crc kubenswrapper[4769]: I1125 11:12:45.567181 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ccknj_36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24/registry-server/0.log" Nov 25 11:12:45 crc kubenswrapper[4769]: I1125 11:12:45.567960 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccknj" event={"ID":"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24","Type":"ContainerStarted","Data":"2ce9161590efb0cbf9f27ae0ba8ac861110a0de8d2a551e1df1028d917874094"} Nov 25 11:12:46 crc kubenswrapper[4769]: I1125 11:12:46.228713 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:46 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:46 crc kubenswrapper[4769]: > Nov 25 11:12:46 crc kubenswrapper[4769]: I1125 11:12:46.237252 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:12:46 crc kubenswrapper[4769]: E1125 11:12:46.237691 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:12:46 crc kubenswrapper[4769]: I1125 11:12:46.587950 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pq2q8_71135c8c-e27a-4ae0-9e15-afe6164def89/registry-server/0.log" Nov 25 11:12:46 crc kubenswrapper[4769]: I1125 11:12:46.589071 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pq2q8" event={"ID":"71135c8c-e27a-4ae0-9e15-afe6164def89","Type":"ContainerStarted","Data":"5ee4eb2cabb4bfb20c07a1ac10dc8a00f3e727297791d718cb56b3962af6fdd1"} Nov 25 11:12:47 crc kubenswrapper[4769]: I1125 11:12:47.603383 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-c6g96_b831a4b0-2508-4624-b8cb-833a8d5b10ad/registry-server/0.log" Nov 25 11:12:47 crc kubenswrapper[4769]: I1125 11:12:47.604838 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6g96" event={"ID":"b831a4b0-2508-4624-b8cb-833a8d5b10ad","Type":"ContainerStarted","Data":"9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d"} Nov 25 11:12:49 crc kubenswrapper[4769]: I1125 11:12:49.884182 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:12:49 crc kubenswrapper[4769]: I1125 11:12:49.885603 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:12:50 crc kubenswrapper[4769]: I1125 11:12:50.166016 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:50 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:50 crc kubenswrapper[4769]: > Nov 25 11:12:50 crc kubenswrapper[4769]: I1125 11:12:50.943660 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:50 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:50 crc kubenswrapper[4769]: > Nov 25 11:12:51 crc kubenswrapper[4769]: I1125 11:12:51.020264 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:51 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:51 crc kubenswrapper[4769]: > Nov 25 11:12:51 crc kubenswrapper[4769]: I1125 11:12:51.683988 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:12:51 crc kubenswrapper[4769]: I1125 11:12:51.686128 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:12:52 crc kubenswrapper[4769]: I1125 11:12:52.019406 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:12:52 crc kubenswrapper[4769]: I1125 11:12:52.019460 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:12:52 crc kubenswrapper[4769]: I1125 11:12:52.421030 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:52 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:52 crc kubenswrapper[4769]: > Nov 25 11:12:52 crc kubenswrapper[4769]: I1125 11:12:52.756455 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:52 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:52 crc kubenswrapper[4769]: > Nov 25 11:12:53 crc kubenswrapper[4769]: I1125 11:12:53.073438 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:53 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:53 crc kubenswrapper[4769]: > Nov 25 11:12:53 crc kubenswrapper[4769]: I1125 11:12:53.155996 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:12:53 crc kubenswrapper[4769]: I1125 11:12:53.156053 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:12:53 crc kubenswrapper[4769]: I1125 11:12:53.482799 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:53 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:53 crc kubenswrapper[4769]: > Nov 25 11:12:54 crc kubenswrapper[4769]: I1125 11:12:54.133212 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:12:54 crc kubenswrapper[4769]: I1125 11:12:54.133258 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:12:54 crc kubenswrapper[4769]: I1125 11:12:54.764458 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:54 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:54 crc kubenswrapper[4769]: > Nov 25 11:12:54 crc kubenswrapper[4769]: I1125 11:12:54.804293 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:54 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:54 crc kubenswrapper[4769]: > Nov 25 11:12:55 crc kubenswrapper[4769]: I1125 11:12:55.191703 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:55 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:55 crc kubenswrapper[4769]: > Nov 25 11:12:56 crc kubenswrapper[4769]: I1125 11:12:56.240271 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:12:56 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:12:56 crc kubenswrapper[4769]: > Nov 25 11:13:00 crc kubenswrapper[4769]: I1125 11:13:00.153426 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:00 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:00 crc kubenswrapper[4769]: > Nov 25 11:13:00 crc kubenswrapper[4769]: I1125 11:13:00.237437 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:13:00 crc kubenswrapper[4769]: E1125 11:13:00.237834 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:13:00 crc kubenswrapper[4769]: I1125 11:13:00.937553 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:00 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:00 crc kubenswrapper[4769]: > Nov 25 11:13:01 crc kubenswrapper[4769]: I1125 11:13:01.028656 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:01 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:01 crc kubenswrapper[4769]: > Nov 25 11:13:02 crc kubenswrapper[4769]: I1125 11:13:02.236133 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:02 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:02 crc kubenswrapper[4769]: > Nov 25 11:13:02 crc kubenswrapper[4769]: I1125 11:13:02.732676 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:02 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:02 crc kubenswrapper[4769]: > Nov 25 11:13:03 crc kubenswrapper[4769]: I1125 11:13:03.070796 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:03 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:03 crc kubenswrapper[4769]: > Nov 25 11:13:03 crc kubenswrapper[4769]: I1125 11:13:03.489694 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:03 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:03 crc kubenswrapper[4769]: > Nov 25 11:13:04 crc kubenswrapper[4769]: I1125 11:13:04.833824 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:04 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:04 crc kubenswrapper[4769]: > Nov 25 11:13:04 crc kubenswrapper[4769]: I1125 11:13:04.840592 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:04 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:04 crc kubenswrapper[4769]: > Nov 25 11:13:05 crc kubenswrapper[4769]: I1125 11:13:05.188224 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:05 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:05 crc kubenswrapper[4769]: > Nov 25 11:13:06 crc kubenswrapper[4769]: I1125 11:13:06.241866 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:06 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:06 crc kubenswrapper[4769]: > Nov 25 11:13:10 crc kubenswrapper[4769]: I1125 11:13:10.157706 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:10 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:10 crc kubenswrapper[4769]: > Nov 25 11:13:10 crc kubenswrapper[4769]: I1125 11:13:10.937540 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:10 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:10 crc kubenswrapper[4769]: > Nov 25 11:13:11 crc kubenswrapper[4769]: I1125 11:13:11.033204 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:11 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:11 crc kubenswrapper[4769]: > Nov 25 11:13:12 crc kubenswrapper[4769]: I1125 11:13:12.235735 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:12 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:12 crc kubenswrapper[4769]: > Nov 25 11:13:12 crc kubenswrapper[4769]: I1125 11:13:12.750525 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:12 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:12 crc kubenswrapper[4769]: > Nov 25 11:13:13 crc kubenswrapper[4769]: I1125 11:13:13.065455 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:13 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:13 crc kubenswrapper[4769]: > Nov 25 11:13:13 crc kubenswrapper[4769]: I1125 11:13:13.487947 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:13 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:13 crc kubenswrapper[4769]: > Nov 25 11:13:13 crc kubenswrapper[4769]: I1125 11:13:13.911809 4769 generic.go:334] "Generic (PLEG): container finished" podID="03f7a5db-5b5b-4107-ab58-1fc92bea67a1" containerID="9d6770ac412bfcf02ae85d65be770e22456a557e0627cfad10dba0492dac096c" exitCode=1 Nov 25 11:13:13 crc kubenswrapper[4769]: I1125 11:13:13.911854 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"03f7a5db-5b5b-4107-ab58-1fc92bea67a1","Type":"ContainerDied","Data":"9d6770ac412bfcf02ae85d65be770e22456a557e0627cfad10dba0492dac096c"} Nov 25 11:13:14 crc kubenswrapper[4769]: I1125 11:13:14.214760 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:14 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:14 crc kubenswrapper[4769]: > Nov 25 11:13:14 crc kubenswrapper[4769]: I1125 11:13:14.237397 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:13:14 crc kubenswrapper[4769]: E1125 11:13:14.237997 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:13:14 crc kubenswrapper[4769]: I1125 11:13:14.628156 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:14 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:14 crc kubenswrapper[4769]: > Nov 25 11:13:15 crc kubenswrapper[4769]: I1125 11:13:15.189877 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:15 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:15 crc kubenswrapper[4769]: > Nov 25 11:13:15 crc kubenswrapper[4769]: I1125 11:13:15.936553 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"03f7a5db-5b5b-4107-ab58-1fc92bea67a1","Type":"ContainerDied","Data":"ea1c4715a89b221ec2cb536fad6d08aa9e4d5173b32268c2f595124f70ec8aba"} Nov 25 11:13:15 crc kubenswrapper[4769]: I1125 11:13:15.937039 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea1c4715a89b221ec2cb536fad6d08aa9e4d5173b32268c2f595124f70ec8aba" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.126357 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.235210 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-test-operator-ephemeral-workdir\") pod \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.235299 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-test-operator-ephemeral-temporary\") pod \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.235396 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-openstack-config-secret\") pod \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.235422 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.235460 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gs7sr\" (UniqueName: \"kubernetes.io/projected/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-kube-api-access-gs7sr\") pod \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.235497 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-openstack-config\") pod \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.235603 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-config-data\") pod \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.235673 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-ssh-key\") pod \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.235763 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-ca-certs\") pod \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\" (UID: \"03f7a5db-5b5b-4107-ab58-1fc92bea67a1\") " Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.236614 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "03f7a5db-5b5b-4107-ab58-1fc92bea67a1" (UID: "03f7a5db-5b5b-4107-ab58-1fc92bea67a1"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.237271 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-config-data" (OuterVolumeSpecName: "config-data") pod "03f7a5db-5b5b-4107-ab58-1fc92bea67a1" (UID: "03f7a5db-5b5b-4107-ab58-1fc92bea67a1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.242557 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-kube-api-access-gs7sr" (OuterVolumeSpecName: "kube-api-access-gs7sr") pod "03f7a5db-5b5b-4107-ab58-1fc92bea67a1" (UID: "03f7a5db-5b5b-4107-ab58-1fc92bea67a1"). InnerVolumeSpecName "kube-api-access-gs7sr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.243039 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "test-operator-logs") pod "03f7a5db-5b5b-4107-ab58-1fc92bea67a1" (UID: "03f7a5db-5b5b-4107-ab58-1fc92bea67a1"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.243225 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "03f7a5db-5b5b-4107-ab58-1fc92bea67a1" (UID: "03f7a5db-5b5b-4107-ab58-1fc92bea67a1"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.272854 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "03f7a5db-5b5b-4107-ab58-1fc92bea67a1" (UID: "03f7a5db-5b5b-4107-ab58-1fc92bea67a1"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.273823 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "03f7a5db-5b5b-4107-ab58-1fc92bea67a1" (UID: "03f7a5db-5b5b-4107-ab58-1fc92bea67a1"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.290803 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "03f7a5db-5b5b-4107-ab58-1fc92bea67a1" (UID: "03f7a5db-5b5b-4107-ab58-1fc92bea67a1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.313432 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "03f7a5db-5b5b-4107-ab58-1fc92bea67a1" (UID: "03f7a5db-5b5b-4107-ab58-1fc92bea67a1"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.339657 4769 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.339691 4769 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.339712 4769 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.339724 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gs7sr\" (UniqueName: \"kubernetes.io/projected/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-kube-api-access-gs7sr\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.339733 4769 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.339741 4769 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.339750 4769 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.339757 4769 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.339766 4769 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/03f7a5db-5b5b-4107-ab58-1fc92bea67a1-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.366775 4769 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.441650 4769 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:16 crc kubenswrapper[4769]: I1125 11:13:16.946137 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 11:13:17 crc kubenswrapper[4769]: I1125 11:13:17.043077 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:17 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:17 crc kubenswrapper[4769]: > Nov 25 11:13:20 crc kubenswrapper[4769]: I1125 11:13:20.156960 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:20 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:20 crc kubenswrapper[4769]: > Nov 25 11:13:20 crc kubenswrapper[4769]: I1125 11:13:20.934738 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:20 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:20 crc kubenswrapper[4769]: > Nov 25 11:13:21 crc kubenswrapper[4769]: I1125 11:13:21.026938 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:21 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:21 crc kubenswrapper[4769]: > Nov 25 11:13:22 crc kubenswrapper[4769]: I1125 11:13:22.244710 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:22 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:22 crc kubenswrapper[4769]: > Nov 25 11:13:22 crc kubenswrapper[4769]: I1125 11:13:22.732096 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:22 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:22 crc kubenswrapper[4769]: > Nov 25 11:13:23 crc kubenswrapper[4769]: I1125 11:13:23.067018 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:23 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:23 crc kubenswrapper[4769]: > Nov 25 11:13:23 crc kubenswrapper[4769]: I1125 11:13:23.485944 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:23 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:23 crc kubenswrapper[4769]: > Nov 25 11:13:24 crc kubenswrapper[4769]: I1125 11:13:24.209566 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:24 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:24 crc kubenswrapper[4769]: > Nov 25 11:13:24 crc kubenswrapper[4769]: I1125 11:13:24.622093 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:24 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:24 crc kubenswrapper[4769]: > Nov 25 11:13:25 crc kubenswrapper[4769]: I1125 11:13:25.190437 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:25 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:25 crc kubenswrapper[4769]: > Nov 25 11:13:26 crc kubenswrapper[4769]: I1125 11:13:26.238752 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:26 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:26 crc kubenswrapper[4769]: > Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.015617 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 11:13:27 crc kubenswrapper[4769]: E1125 11:13:27.025149 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerName="registry-server" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.025180 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerName="registry-server" Nov 25 11:13:27 crc kubenswrapper[4769]: E1125 11:13:27.025204 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerName="extract-content" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.025211 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerName="extract-content" Nov 25 11:13:27 crc kubenswrapper[4769]: E1125 11:13:27.025226 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerName="extract-utilities" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.025239 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerName="extract-utilities" Nov 25 11:13:27 crc kubenswrapper[4769]: E1125 11:13:27.025468 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03f7a5db-5b5b-4107-ab58-1fc92bea67a1" containerName="tempest-tests-tempest-tests-runner" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.025476 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="03f7a5db-5b5b-4107-ab58-1fc92bea67a1" containerName="tempest-tests-tempest-tests-runner" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.026288 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="db9dc5e0-3558-4ce3-bc87-611a67d7e949" containerName="registry-server" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.026382 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="03f7a5db-5b5b-4107-ab58-1fc92bea67a1" containerName="tempest-tests-tempest-tests-runner" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.037513 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.073087 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-vc5tq" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.143016 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"138fc3df-1668-45d3-84d4-ccd0743bbcdf\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.143512 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9w78z\" (UniqueName: \"kubernetes.io/projected/138fc3df-1668-45d3-84d4-ccd0743bbcdf-kube-api-access-9w78z\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"138fc3df-1668-45d3-84d4-ccd0743bbcdf\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.175837 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.246712 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"138fc3df-1668-45d3-84d4-ccd0743bbcdf\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.247032 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9w78z\" (UniqueName: \"kubernetes.io/projected/138fc3df-1668-45d3-84d4-ccd0743bbcdf-kube-api-access-9w78z\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"138fc3df-1668-45d3-84d4-ccd0743bbcdf\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.265852 4769 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"138fc3df-1668-45d3-84d4-ccd0743bbcdf\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.394873 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9w78z\" (UniqueName: \"kubernetes.io/projected/138fc3df-1668-45d3-84d4-ccd0743bbcdf-kube-api-access-9w78z\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"138fc3df-1668-45d3-84d4-ccd0743bbcdf\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.396412 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"138fc3df-1668-45d3-84d4-ccd0743bbcdf\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:13:27 crc kubenswrapper[4769]: I1125 11:13:27.679882 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 11:13:28 crc kubenswrapper[4769]: I1125 11:13:28.237858 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:13:28 crc kubenswrapper[4769]: E1125 11:13:28.238634 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:13:28 crc kubenswrapper[4769]: I1125 11:13:28.508109 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 11:13:29 crc kubenswrapper[4769]: I1125 11:13:29.093406 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"138fc3df-1668-45d3-84d4-ccd0743bbcdf","Type":"ContainerStarted","Data":"04e474a8e8da49a022eeab714b958d2d9615dcb2231a476201dbdaf6c1290ae9"} Nov 25 11:13:30 crc kubenswrapper[4769]: I1125 11:13:30.170831 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:30 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:30 crc kubenswrapper[4769]: > Nov 25 11:13:30 crc kubenswrapper[4769]: I1125 11:13:30.950057 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:30 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:30 crc kubenswrapper[4769]: > Nov 25 11:13:31 crc kubenswrapper[4769]: I1125 11:13:31.018625 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:31 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:31 crc kubenswrapper[4769]: > Nov 25 11:13:31 crc kubenswrapper[4769]: I1125 11:13:31.115748 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"138fc3df-1668-45d3-84d4-ccd0743bbcdf","Type":"ContainerStarted","Data":"3069c2c265cade26a387dd3854189b4347966c5d1a74797ae6b4760d66b9f6c4"} Nov 25 11:13:32 crc kubenswrapper[4769]: I1125 11:13:32.243040 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:32 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:32 crc kubenswrapper[4769]: > Nov 25 11:13:32 crc kubenswrapper[4769]: I1125 11:13:32.738700 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:32 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:32 crc kubenswrapper[4769]: > Nov 25 11:13:33 crc kubenswrapper[4769]: I1125 11:13:33.063719 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:33 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:33 crc kubenswrapper[4769]: > Nov 25 11:13:33 crc kubenswrapper[4769]: I1125 11:13:33.481858 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:33 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:33 crc kubenswrapper[4769]: > Nov 25 11:13:34 crc kubenswrapper[4769]: I1125 11:13:34.207819 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:34 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:34 crc kubenswrapper[4769]: > Nov 25 11:13:34 crc kubenswrapper[4769]: I1125 11:13:34.630560 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:34 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:34 crc kubenswrapper[4769]: > Nov 25 11:13:35 crc kubenswrapper[4769]: I1125 11:13:35.181089 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:35 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:35 crc kubenswrapper[4769]: > Nov 25 11:13:36 crc kubenswrapper[4769]: I1125 11:13:36.245703 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:36 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:36 crc kubenswrapper[4769]: > Nov 25 11:13:39 crc kubenswrapper[4769]: I1125 11:13:39.238607 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:13:39 crc kubenswrapper[4769]: E1125 11:13:39.239525 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:13:40 crc kubenswrapper[4769]: I1125 11:13:40.152807 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:40 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:40 crc kubenswrapper[4769]: > Nov 25 11:13:40 crc kubenswrapper[4769]: I1125 11:13:40.939926 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:40 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:40 crc kubenswrapper[4769]: > Nov 25 11:13:41 crc kubenswrapper[4769]: I1125 11:13:41.017460 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:41 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:41 crc kubenswrapper[4769]: > Nov 25 11:13:42 crc kubenswrapper[4769]: I1125 11:13:42.244980 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:42 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:42 crc kubenswrapper[4769]: > Nov 25 11:13:42 crc kubenswrapper[4769]: I1125 11:13:42.753926 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:42 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:42 crc kubenswrapper[4769]: > Nov 25 11:13:43 crc kubenswrapper[4769]: I1125 11:13:43.065702 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:43 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:43 crc kubenswrapper[4769]: > Nov 25 11:13:43 crc kubenswrapper[4769]: I1125 11:13:43.486573 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:43 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:43 crc kubenswrapper[4769]: > Nov 25 11:13:44 crc kubenswrapper[4769]: I1125 11:13:44.208724 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:44 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:44 crc kubenswrapper[4769]: > Nov 25 11:13:44 crc kubenswrapper[4769]: I1125 11:13:44.628281 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:44 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:44 crc kubenswrapper[4769]: > Nov 25 11:13:45 crc kubenswrapper[4769]: I1125 11:13:45.183235 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:45 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:45 crc kubenswrapper[4769]: > Nov 25 11:13:46 crc kubenswrapper[4769]: I1125 11:13:46.236062 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:46 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:46 crc kubenswrapper[4769]: > Nov 25 11:13:49 crc kubenswrapper[4769]: I1125 11:13:49.197354 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:13:49 crc kubenswrapper[4769]: I1125 11:13:49.239612 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=21.855739371 podStartE2EDuration="23.238676547s" podCreationTimestamp="2025-11-25 11:13:26 +0000 UTC" firstStartedPulling="2025-11-25 11:13:28.611293891 +0000 UTC m=+5357.196266204" lastFinishedPulling="2025-11-25 11:13:29.994231067 +0000 UTC m=+5358.579203380" observedRunningTime="2025-11-25 11:13:31.132111492 +0000 UTC m=+5359.717083815" watchObservedRunningTime="2025-11-25 11:13:49.238676547 +0000 UTC m=+5377.823648860" Nov 25 11:13:49 crc kubenswrapper[4769]: I1125 11:13:49.263298 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:13:50 crc kubenswrapper[4769]: I1125 11:13:50.017828 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:13:50 crc kubenswrapper[4769]: I1125 11:13:50.084561 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:13:50 crc kubenswrapper[4769]: I1125 11:13:50.941330 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:50 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:50 crc kubenswrapper[4769]: > Nov 25 11:13:51 crc kubenswrapper[4769]: I1125 11:13:51.237734 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:13:51 crc kubenswrapper[4769]: E1125 11:13:51.238446 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:13:51 crc kubenswrapper[4769]: I1125 11:13:51.245981 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:13:51 crc kubenswrapper[4769]: I1125 11:13:51.350984 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:13:52 crc kubenswrapper[4769]: I1125 11:13:52.733252 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:52 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:52 crc kubenswrapper[4769]: > Nov 25 11:13:53 crc kubenswrapper[4769]: I1125 11:13:53.070204 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:53 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:53 crc kubenswrapper[4769]: > Nov 25 11:13:53 crc kubenswrapper[4769]: I1125 11:13:53.498688 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:53 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:53 crc kubenswrapper[4769]: > Nov 25 11:13:54 crc kubenswrapper[4769]: I1125 11:13:54.184193 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:13:54 crc kubenswrapper[4769]: I1125 11:13:54.203442 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:54 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:54 crc kubenswrapper[4769]: > Nov 25 11:13:54 crc kubenswrapper[4769]: I1125 11:13:54.259213 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:13:54 crc kubenswrapper[4769]: I1125 11:13:54.627061 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:54 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:54 crc kubenswrapper[4769]: > Nov 25 11:13:56 crc kubenswrapper[4769]: I1125 11:13:56.235325 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" probeResult="failure" output=< Nov 25 11:13:56 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:13:56 crc kubenswrapper[4769]: > Nov 25 11:13:59 crc kubenswrapper[4769]: I1125 11:13:59.939688 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:13:59 crc kubenswrapper[4769]: I1125 11:13:59.997564 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:14:01 crc kubenswrapper[4769]: I1125 11:14:01.765428 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:14:01 crc kubenswrapper[4769]: I1125 11:14:01.834229 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:14:02 crc kubenswrapper[4769]: I1125 11:14:02.079722 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:14:02 crc kubenswrapper[4769]: I1125 11:14:02.173356 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:14:02 crc kubenswrapper[4769]: I1125 11:14:02.503900 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:14:02 crc kubenswrapper[4769]: I1125 11:14:02.556146 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:14:03 crc kubenswrapper[4769]: I1125 11:14:03.211767 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:14:03 crc kubenswrapper[4769]: I1125 11:14:03.272109 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:14:03 crc kubenswrapper[4769]: I1125 11:14:03.635658 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:14:03 crc kubenswrapper[4769]: I1125 11:14:03.695007 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:14:05 crc kubenswrapper[4769]: I1125 11:14:05.256467 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:14:05 crc kubenswrapper[4769]: I1125 11:14:05.317450 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.191582 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-856hd"] Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.194777 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-856hd" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" containerID="cri-o://7bbb8e9e5221e6b1e6bd2d68d5535139e6efe50a4d3ba16c44a019da0fb5c1db" gracePeriod=2 Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.237270 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:14:06 crc kubenswrapper[4769]: E1125 11:14:06.237773 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.384156 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ccknj"] Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.385829 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ccknj" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" containerID="cri-o://2ce9161590efb0cbf9f27ae0ba8ac861110a0de8d2a551e1df1028d917874094" gracePeriod=2 Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.582102 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gkx9w"] Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.582362 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gkx9w" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" containerID="cri-o://97d3e2f5500f546826a966cbcf97eda09bab5c7837cf4b791ecb7db4bc235c6a" gracePeriod=2 Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.595344 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ccknj_36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24/registry-server/0.log" Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.596429 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccknj" event={"ID":"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24","Type":"ContainerDied","Data":"2ce9161590efb0cbf9f27ae0ba8ac861110a0de8d2a551e1df1028d917874094"} Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.596497 4769 scope.go:117] "RemoveContainer" containerID="06b1191036f240719dbe9a83b3904364f6e7209c7ca666c6105627d6be466272" Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.596246 4769 generic.go:334] "Generic (PLEG): container finished" podID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerID="2ce9161590efb0cbf9f27ae0ba8ac861110a0de8d2a551e1df1028d917874094" exitCode=0 Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.600876 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-856hd_4a6225f9-cdc5-486a-b813-db81a752fade/registry-server/0.log" Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.608523 4769 generic.go:334] "Generic (PLEG): container finished" podID="4a6225f9-cdc5-486a-b813-db81a752fade" containerID="7bbb8e9e5221e6b1e6bd2d68d5535139e6efe50a4d3ba16c44a019da0fb5c1db" exitCode=0 Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.608573 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-856hd" event={"ID":"4a6225f9-cdc5-486a-b813-db81a752fade","Type":"ContainerDied","Data":"7bbb8e9e5221e6b1e6bd2d68d5535139e6efe50a4d3ba16c44a019da0fb5c1db"} Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.672838 4769 scope.go:117] "RemoveContainer" containerID="c11092b5f3f5b634a49406a81945567aba55f7e8eba28dfd0eb51eb84b1e6c5b" Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.788044 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gwvnd"] Nov 25 11:14:06 crc kubenswrapper[4769]: I1125 11:14:06.788306 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gwvnd" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" containerID="cri-o://a1568be57c7beab61bb0a6b097832669595590c8290f7842f32a3054807d0107" gracePeriod=2 Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:06.994229 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h2wlq"] Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:06.994850 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h2wlq" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" containerID="cri-o://c321d22a1ae6fa2b8d2be1a746c67da60ad4d81030707dc7e25df63a208f8830" gracePeriod=2 Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.207363 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pq2q8"] Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.207578 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pq2q8" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" containerID="cri-o://5ee4eb2cabb4bfb20c07a1ac10dc8a00f3e727297791d718cb56b3962af6fdd1" gracePeriod=2 Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.228136 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.278057 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-catalog-content\") pod \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\" (UID: \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\") " Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.278691 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfkjl\" (UniqueName: \"kubernetes.io/projected/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-kube-api-access-mfkjl\") pod \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\" (UID: \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\") " Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.278894 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-utilities\") pod \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\" (UID: \"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24\") " Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.280438 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-utilities" (OuterVolumeSpecName: "utilities") pod "36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" (UID: "36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.298920 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-kube-api-access-mfkjl" (OuterVolumeSpecName: "kube-api-access-mfkjl") pod "36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" (UID: "36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24"). InnerVolumeSpecName "kube-api-access-mfkjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.382345 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfkjl\" (UniqueName: \"kubernetes.io/projected/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-kube-api-access-mfkjl\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.382380 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.390190 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h57kt"] Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.390557 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h57kt" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" containerID="cri-o://86b51cfdab8c3140e15a460449dbe43cf35a1d4a2038624f852243daba5403fc" gracePeriod=2 Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.412733 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" (UID: "36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.488532 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.619776 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vr22m"] Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.620010 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vr22m" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" containerID="cri-o://831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5" gracePeriod=2 Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.636158 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h57kt_af5c9899-c379-4d60-9aae-979a20de2ad2/registry-server/0.log" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.638682 4769 generic.go:334] "Generic (PLEG): container finished" podID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerID="86b51cfdab8c3140e15a460449dbe43cf35a1d4a2038624f852243daba5403fc" exitCode=0 Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.638732 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h57kt" event={"ID":"af5c9899-c379-4d60-9aae-979a20de2ad2","Type":"ContainerDied","Data":"86b51cfdab8c3140e15a460449dbe43cf35a1d4a2038624f852243daba5403fc"} Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.638897 4769 scope.go:117] "RemoveContainer" containerID="4ba6f479383af1853419c0467f8b024327a29a7aa073259cad5c62f4e347f088" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.645681 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ccknj" event={"ID":"36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24","Type":"ContainerDied","Data":"a31d9bf85cff2449d94c5ae5b16529c7774f8327c9476b36e351b416801bd6d6"} Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.645784 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ccknj" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.654759 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pq2q8_71135c8c-e27a-4ae0-9e15-afe6164def89/registry-server/0.log" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.655923 4769 generic.go:334] "Generic (PLEG): container finished" podID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerID="5ee4eb2cabb4bfb20c07a1ac10dc8a00f3e727297791d718cb56b3962af6fdd1" exitCode=0 Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.656016 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pq2q8" event={"ID":"71135c8c-e27a-4ae0-9e15-afe6164def89","Type":"ContainerDied","Data":"5ee4eb2cabb4bfb20c07a1ac10dc8a00f3e727297791d718cb56b3962af6fdd1"} Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.663661 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gwvnd_e588820f-3563-4b98-9ba4-85da1cee2821/registry-server/0.log" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.664784 4769 generic.go:334] "Generic (PLEG): container finished" podID="e588820f-3563-4b98-9ba4-85da1cee2821" containerID="a1568be57c7beab61bb0a6b097832669595590c8290f7842f32a3054807d0107" exitCode=0 Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.664874 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvnd" event={"ID":"e588820f-3563-4b98-9ba4-85da1cee2821","Type":"ContainerDied","Data":"a1568be57c7beab61bb0a6b097832669595590c8290f7842f32a3054807d0107"} Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.669377 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h2wlq_993bc795-fde2-4876-9e76-e97a6095576d/registry-server/0.log" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.671024 4769 generic.go:334] "Generic (PLEG): container finished" podID="993bc795-fde2-4876-9e76-e97a6095576d" containerID="c321d22a1ae6fa2b8d2be1a746c67da60ad4d81030707dc7e25df63a208f8830" exitCode=0 Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.671155 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h2wlq" event={"ID":"993bc795-fde2-4876-9e76-e97a6095576d","Type":"ContainerDied","Data":"c321d22a1ae6fa2b8d2be1a746c67da60ad4d81030707dc7e25df63a208f8830"} Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.671384 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h2wlq" event={"ID":"993bc795-fde2-4876-9e76-e97a6095576d","Type":"ContainerDied","Data":"bd331a544245279f04df5134a181d238a472359c28a0f098c0e6628a19905626"} Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.672648 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd331a544245279f04df5134a181d238a472359c28a0f098c0e6628a19905626" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.677322 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gkx9w_f1b1fb95-30bd-457e-b59c-c06e6b770f42/registry-server/0.log" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.680705 4769 generic.go:334] "Generic (PLEG): container finished" podID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerID="97d3e2f5500f546826a966cbcf97eda09bab5c7837cf4b791ecb7db4bc235c6a" exitCode=0 Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.680794 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkx9w" event={"ID":"f1b1fb95-30bd-457e-b59c-c06e6b770f42","Type":"ContainerDied","Data":"97d3e2f5500f546826a966cbcf97eda09bab5c7837cf4b791ecb7db4bc235c6a"} Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.680867 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkx9w" event={"ID":"f1b1fb95-30bd-457e-b59c-c06e6b770f42","Type":"ContainerDied","Data":"296ceb0fb976757174aefb38081c1f4e2cb575e1e88ee597c305471bc06acd1b"} Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.680897 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="296ceb0fb976757174aefb38081c1f4e2cb575e1e88ee597c305471bc06acd1b" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.684304 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-856hd" event={"ID":"4a6225f9-cdc5-486a-b813-db81a752fade","Type":"ContainerDied","Data":"a2daff87d522bc692ed1663f2f67614d72e59b59571615f73ee110f46c5f9a01"} Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.684464 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2daff87d522bc692ed1663f2f67614d72e59b59571615f73ee110f46c5f9a01" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.787100 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c6g96"] Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.787356 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c6g96" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" containerID="cri-o://9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d" gracePeriod=2 Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.819153 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.836955 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gkx9w_f1b1fb95-30bd-457e-b59c-c06e6b770f42/registry-server/0.log" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.838098 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.845764 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ccknj"] Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.850230 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-h2wlq_993bc795-fde2-4876-9e76-e97a6095576d/registry-server/0.log" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.852868 4769 scope.go:117] "RemoveContainer" containerID="2ce9161590efb0cbf9f27ae0ba8ac861110a0de8d2a551e1df1028d917874094" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.853012 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.864160 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ccknj"] Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.869711 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-gwvnd_e588820f-3563-4b98-9ba4-85da1cee2821/registry-server/0.log" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.871452 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.900210 4769 scope.go:117] "RemoveContainer" containerID="151df6f7f41c3828ab34928d0805938c6a65452bacc249ceaa2784140a1136a7" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.900776 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/993bc795-fde2-4876-9e76-e97a6095576d-catalog-content\") pod \"993bc795-fde2-4876-9e76-e97a6095576d\" (UID: \"993bc795-fde2-4876-9e76-e97a6095576d\") " Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.900816 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/993bc795-fde2-4876-9e76-e97a6095576d-utilities\") pod \"993bc795-fde2-4876-9e76-e97a6095576d\" (UID: \"993bc795-fde2-4876-9e76-e97a6095576d\") " Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.900858 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85t7t\" (UniqueName: \"kubernetes.io/projected/993bc795-fde2-4876-9e76-e97a6095576d-kube-api-access-85t7t\") pod \"993bc795-fde2-4876-9e76-e97a6095576d\" (UID: \"993bc795-fde2-4876-9e76-e97a6095576d\") " Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.900890 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lw9wf\" (UniqueName: \"kubernetes.io/projected/f1b1fb95-30bd-457e-b59c-c06e6b770f42-kube-api-access-lw9wf\") pod \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\" (UID: \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\") " Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.900922 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-utilities\") pod \"4a6225f9-cdc5-486a-b813-db81a752fade\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.904163 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-utilities" (OuterVolumeSpecName: "utilities") pod "4a6225f9-cdc5-486a-b813-db81a752fade" (UID: "4a6225f9-cdc5-486a-b813-db81a752fade"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.908382 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q79c4\" (UniqueName: \"kubernetes.io/projected/4a6225f9-cdc5-486a-b813-db81a752fade-kube-api-access-q79c4\") pod \"4a6225f9-cdc5-486a-b813-db81a752fade\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.908464 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-catalog-content\") pod \"4a6225f9-cdc5-486a-b813-db81a752fade\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.908536 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1b1fb95-30bd-457e-b59c-c06e6b770f42-catalog-content\") pod \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\" (UID: \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\") " Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.908605 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1b1fb95-30bd-457e-b59c-c06e6b770f42-utilities\") pod \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\" (UID: \"f1b1fb95-30bd-457e-b59c-c06e6b770f42\") " Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.909744 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/993bc795-fde2-4876-9e76-e97a6095576d-kube-api-access-85t7t" (OuterVolumeSpecName: "kube-api-access-85t7t") pod "993bc795-fde2-4876-9e76-e97a6095576d" (UID: "993bc795-fde2-4876-9e76-e97a6095576d"). InnerVolumeSpecName "kube-api-access-85t7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.912138 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1b1fb95-30bd-457e-b59c-c06e6b770f42-utilities" (OuterVolumeSpecName: "utilities") pod "f1b1fb95-30bd-457e-b59c-c06e6b770f42" (UID: "f1b1fb95-30bd-457e-b59c-c06e6b770f42"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.913825 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1b1fb95-30bd-457e-b59c-c06e6b770f42-kube-api-access-lw9wf" (OuterVolumeSpecName: "kube-api-access-lw9wf") pod "f1b1fb95-30bd-457e-b59c-c06e6b770f42" (UID: "f1b1fb95-30bd-457e-b59c-c06e6b770f42"). InnerVolumeSpecName "kube-api-access-lw9wf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.914064 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a6225f9-cdc5-486a-b813-db81a752fade-kube-api-access-q79c4" (OuterVolumeSpecName: "kube-api-access-q79c4") pod "4a6225f9-cdc5-486a-b813-db81a752fade" (UID: "4a6225f9-cdc5-486a-b813-db81a752fade"). InnerVolumeSpecName "kube-api-access-q79c4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.921908 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/993bc795-fde2-4876-9e76-e97a6095576d-utilities" (OuterVolumeSpecName: "utilities") pod "993bc795-fde2-4876-9e76-e97a6095576d" (UID: "993bc795-fde2-4876-9e76-e97a6095576d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.924719 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85t7t\" (UniqueName: \"kubernetes.io/projected/993bc795-fde2-4876-9e76-e97a6095576d-kube-api-access-85t7t\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.924744 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lw9wf\" (UniqueName: \"kubernetes.io/projected/f1b1fb95-30bd-457e-b59c-c06e6b770f42-kube-api-access-lw9wf\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.924983 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.925001 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q79c4\" (UniqueName: \"kubernetes.io/projected/4a6225f9-cdc5-486a-b813-db81a752fade-kube-api-access-q79c4\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.925010 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f1b1fb95-30bd-457e-b59c-c06e6b770f42-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.925023 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/993bc795-fde2-4876-9e76-e97a6095576d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.928145 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-pq2q8_71135c8c-e27a-4ae0-9e15-afe6164def89/registry-server/0.log" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.948719 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:14:07 crc kubenswrapper[4769]: I1125 11:14:07.970149 4769 scope.go:117] "RemoveContainer" containerID="86e376ec896971639945563443ade776c1d1c20d08a2f850cef284bc5e08fa3b" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.014565 4769 scope.go:117] "RemoveContainer" containerID="169354e3899914d79722f328dea57dfd7c4398163494f6544ce032cba9e0b620" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.029417 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9hv66"] Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.029672 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9hv66" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" containerID="cri-o://fc071f5e0723a066e50b815205af45d4795644cf612a0459af9b2cc2a7ceec74" gracePeriod=2 Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.030929 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7gh8\" (UniqueName: \"kubernetes.io/projected/e588820f-3563-4b98-9ba4-85da1cee2821-kube-api-access-r7gh8\") pod \"e588820f-3563-4b98-9ba4-85da1cee2821\" (UID: \"e588820f-3563-4b98-9ba4-85da1cee2821\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.031266 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71135c8c-e27a-4ae0-9e15-afe6164def89-utilities\") pod \"71135c8c-e27a-4ae0-9e15-afe6164def89\" (UID: \"71135c8c-e27a-4ae0-9e15-afe6164def89\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.040005 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e588820f-3563-4b98-9ba4-85da1cee2821-utilities\") pod \"e588820f-3563-4b98-9ba4-85da1cee2821\" (UID: \"e588820f-3563-4b98-9ba4-85da1cee2821\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.040141 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e588820f-3563-4b98-9ba4-85da1cee2821-catalog-content\") pod \"e588820f-3563-4b98-9ba4-85da1cee2821\" (UID: \"e588820f-3563-4b98-9ba4-85da1cee2821\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.033285 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71135c8c-e27a-4ae0-9e15-afe6164def89-utilities" (OuterVolumeSpecName: "utilities") pod "71135c8c-e27a-4ae0-9e15-afe6164def89" (UID: "71135c8c-e27a-4ae0-9e15-afe6164def89"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.040241 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71135c8c-e27a-4ae0-9e15-afe6164def89-catalog-content\") pod \"71135c8c-e27a-4ae0-9e15-afe6164def89\" (UID: \"71135c8c-e27a-4ae0-9e15-afe6164def89\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.040275 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvbtt\" (UniqueName: \"kubernetes.io/projected/71135c8c-e27a-4ae0-9e15-afe6164def89-kube-api-access-nvbtt\") pod \"71135c8c-e27a-4ae0-9e15-afe6164def89\" (UID: \"71135c8c-e27a-4ae0-9e15-afe6164def89\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.041867 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71135c8c-e27a-4ae0-9e15-afe6164def89-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.043401 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e588820f-3563-4b98-9ba4-85da1cee2821-utilities" (OuterVolumeSpecName: "utilities") pod "e588820f-3563-4b98-9ba4-85da1cee2821" (UID: "e588820f-3563-4b98-9ba4-85da1cee2821"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.061645 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.077394 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e588820f-3563-4b98-9ba4-85da1cee2821-kube-api-access-r7gh8" (OuterVolumeSpecName: "kube-api-access-r7gh8") pod "e588820f-3563-4b98-9ba4-85da1cee2821" (UID: "e588820f-3563-4b98-9ba4-85da1cee2821"). InnerVolumeSpecName "kube-api-access-r7gh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.077491 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71135c8c-e27a-4ae0-9e15-afe6164def89-kube-api-access-nvbtt" (OuterVolumeSpecName: "kube-api-access-nvbtt") pod "71135c8c-e27a-4ae0-9e15-afe6164def89" (UID: "71135c8c-e27a-4ae0-9e15-afe6164def89"). InnerVolumeSpecName "kube-api-access-nvbtt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.089480 4769 scope.go:117] "RemoveContainer" containerID="88daf248b5618ddfa14959abc53a3a1d3536978e0cd35c21a95568586424539e" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.108779 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1b1fb95-30bd-457e-b59c-c06e6b770f42-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f1b1fb95-30bd-457e-b59c-c06e6b770f42" (UID: "f1b1fb95-30bd-457e-b59c-c06e6b770f42"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.144495 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af5c9899-c379-4d60-9aae-979a20de2ad2-utilities\") pod \"af5c9899-c379-4d60-9aae-979a20de2ad2\" (UID: \"af5c9899-c379-4d60-9aae-979a20de2ad2\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.144541 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4x9j\" (UniqueName: \"kubernetes.io/projected/af5c9899-c379-4d60-9aae-979a20de2ad2-kube-api-access-z4x9j\") pod \"af5c9899-c379-4d60-9aae-979a20de2ad2\" (UID: \"af5c9899-c379-4d60-9aae-979a20de2ad2\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.144743 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af5c9899-c379-4d60-9aae-979a20de2ad2-catalog-content\") pod \"af5c9899-c379-4d60-9aae-979a20de2ad2\" (UID: \"af5c9899-c379-4d60-9aae-979a20de2ad2\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.145506 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvbtt\" (UniqueName: \"kubernetes.io/projected/71135c8c-e27a-4ae0-9e15-afe6164def89-kube-api-access-nvbtt\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.145526 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7gh8\" (UniqueName: \"kubernetes.io/projected/e588820f-3563-4b98-9ba4-85da1cee2821-kube-api-access-r7gh8\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.145535 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f1b1fb95-30bd-457e-b59c-c06e6b770f42-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.145544 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e588820f-3563-4b98-9ba4-85da1cee2821-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.152370 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af5c9899-c379-4d60-9aae-979a20de2ad2-utilities" (OuterVolumeSpecName: "utilities") pod "af5c9899-c379-4d60-9aae-979a20de2ad2" (UID: "af5c9899-c379-4d60-9aae-979a20de2ad2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.163594 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af5c9899-c379-4d60-9aae-979a20de2ad2-kube-api-access-z4x9j" (OuterVolumeSpecName: "kube-api-access-z4x9j") pod "af5c9899-c379-4d60-9aae-979a20de2ad2" (UID: "af5c9899-c379-4d60-9aae-979a20de2ad2"). InnerVolumeSpecName "kube-api-access-z4x9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.224527 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/993bc795-fde2-4876-9e76-e97a6095576d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "993bc795-fde2-4876-9e76-e97a6095576d" (UID: "993bc795-fde2-4876-9e76-e97a6095576d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.246723 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a6225f9-cdc5-486a-b813-db81a752fade" (UID: "4a6225f9-cdc5-486a-b813-db81a752fade"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.247709 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-catalog-content\") pod \"4a6225f9-cdc5-486a-b813-db81a752fade\" (UID: \"4a6225f9-cdc5-486a-b813-db81a752fade\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.249096 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af5c9899-c379-4d60-9aae-979a20de2ad2-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.249123 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4x9j\" (UniqueName: \"kubernetes.io/projected/af5c9899-c379-4d60-9aae-979a20de2ad2-kube-api-access-z4x9j\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.249139 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/993bc795-fde2-4876-9e76-e97a6095576d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: W1125 11:14:08.249708 4769 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/4a6225f9-cdc5-486a-b813-db81a752fade/volumes/kubernetes.io~empty-dir/catalog-content Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.249769 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a6225f9-cdc5-486a-b813-db81a752fade" (UID: "4a6225f9-cdc5-486a-b813-db81a752fade"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.253052 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af5c9899-c379-4d60-9aae-979a20de2ad2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "af5c9899-c379-4d60-9aae-979a20de2ad2" (UID: "af5c9899-c379-4d60-9aae-979a20de2ad2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.256294 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" path="/var/lib/kubelet/pods/36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24/volumes" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.266227 4769 scope.go:117] "RemoveContainer" containerID="29fdfb935697aee46562dcc97a060e9c451fb96efb86b1264bca18990f56b411" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.279182 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9v54j"] Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.279460 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9v54j" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" containerID="cri-o://b836d03926facb035157fc561fa40e4d5b0eeaa459d2dc5ec4aa8abf8a9cb149" gracePeriod=2 Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.289693 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71135c8c-e27a-4ae0-9e15-afe6164def89-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "71135c8c-e27a-4ae0-9e15-afe6164def89" (UID: "71135c8c-e27a-4ae0-9e15-afe6164def89"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.297843 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e588820f-3563-4b98-9ba4-85da1cee2821-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e588820f-3563-4b98-9ba4-85da1cee2821" (UID: "e588820f-3563-4b98-9ba4-85da1cee2821"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.310591 4769 scope.go:117] "RemoveContainer" containerID="9efaa7da0c863469625d1dad8af55684166514f7474256c07dcaa91b94506e08" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.352196 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a6225f9-cdc5-486a-b813-db81a752fade-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.352222 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e588820f-3563-4b98-9ba4-85da1cee2821-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.352232 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71135c8c-e27a-4ae0-9e15-afe6164def89-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.352243 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af5c9899-c379-4d60-9aae-979a20de2ad2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.355721 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vr22m_e9d04b78-f610-43ce-bc66-ca181acf6654/registry-server/0.log" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.358935 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.454946 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rz8zv\" (UniqueName: \"kubernetes.io/projected/e9d04b78-f610-43ce-bc66-ca181acf6654-kube-api-access-rz8zv\") pod \"e9d04b78-f610-43ce-bc66-ca181acf6654\" (UID: \"e9d04b78-f610-43ce-bc66-ca181acf6654\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.455653 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9d04b78-f610-43ce-bc66-ca181acf6654-catalog-content\") pod \"e9d04b78-f610-43ce-bc66-ca181acf6654\" (UID: \"e9d04b78-f610-43ce-bc66-ca181acf6654\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.456257 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9d04b78-f610-43ce-bc66-ca181acf6654-utilities\") pod \"e9d04b78-f610-43ce-bc66-ca181acf6654\" (UID: \"e9d04b78-f610-43ce-bc66-ca181acf6654\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.456791 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9d04b78-f610-43ce-bc66-ca181acf6654-utilities" (OuterVolumeSpecName: "utilities") pod "e9d04b78-f610-43ce-bc66-ca181acf6654" (UID: "e9d04b78-f610-43ce-bc66-ca181acf6654"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.457081 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9d04b78-f610-43ce-bc66-ca181acf6654-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.459659 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9d04b78-f610-43ce-bc66-ca181acf6654-kube-api-access-rz8zv" (OuterVolumeSpecName: "kube-api-access-rz8zv") pod "e9d04b78-f610-43ce-bc66-ca181acf6654" (UID: "e9d04b78-f610-43ce-bc66-ca181acf6654"). InnerVolumeSpecName "kube-api-access-rz8zv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.558923 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rz8zv\" (UniqueName: \"kubernetes.io/projected/e9d04b78-f610-43ce-bc66-ca181acf6654-kube-api-access-rz8zv\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.570346 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-c6g96_b831a4b0-2508-4624-b8cb-833a8d5b10ad/registry-server/0.log" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.571897 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.603368 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9d04b78-f610-43ce-bc66-ca181acf6654-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e9d04b78-f610-43ce-bc66-ca181acf6654" (UID: "e9d04b78-f610-43ce-bc66-ca181acf6654"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.661050 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b831a4b0-2508-4624-b8cb-833a8d5b10ad-utilities\") pod \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\" (UID: \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.661125 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b831a4b0-2508-4624-b8cb-833a8d5b10ad-catalog-content\") pod \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\" (UID: \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.661163 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjnpc\" (UniqueName: \"kubernetes.io/projected/b831a4b0-2508-4624-b8cb-833a8d5b10ad-kube-api-access-bjnpc\") pod \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\" (UID: \"b831a4b0-2508-4624-b8cb-833a8d5b10ad\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.662042 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9d04b78-f610-43ce-bc66-ca181acf6654-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.662514 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b831a4b0-2508-4624-b8cb-833a8d5b10ad-utilities" (OuterVolumeSpecName: "utilities") pod "b831a4b0-2508-4624-b8cb-833a8d5b10ad" (UID: "b831a4b0-2508-4624-b8cb-833a8d5b10ad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.667823 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b831a4b0-2508-4624-b8cb-833a8d5b10ad-kube-api-access-bjnpc" (OuterVolumeSpecName: "kube-api-access-bjnpc") pod "b831a4b0-2508-4624-b8cb-833a8d5b10ad" (UID: "b831a4b0-2508-4624-b8cb-833a8d5b10ad"). InnerVolumeSpecName "kube-api-access-bjnpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.707000 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h2wlq" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.721947 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gkx9w" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.744503 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-vr22m_e9d04b78-f610-43ce-bc66-ca181acf6654/registry-server/0.log" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.745162 4769 generic.go:334] "Generic (PLEG): container finished" podID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerID="831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5" exitCode=0 Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.745221 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vr22m" event={"ID":"e9d04b78-f610-43ce-bc66-ca181acf6654","Type":"ContainerDied","Data":"831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5"} Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.745254 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vr22m" event={"ID":"e9d04b78-f610-43ce-bc66-ca181acf6654","Type":"ContainerDied","Data":"b5a5e869a98f1eca074fe92b7132c12104eed2851feeeeb5a63de9eac7d150a7"} Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.745273 4769 scope.go:117] "RemoveContainer" containerID="831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.745395 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vr22m" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.750633 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9v54j_bdd2d26d-cc13-41dd-9e57-fd8e81c627fe/registry-server/0.log" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.753517 4769 generic.go:334] "Generic (PLEG): container finished" podID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerID="b836d03926facb035157fc561fa40e4d5b0eeaa459d2dc5ec4aa8abf8a9cb149" exitCode=0 Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.753590 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9v54j" event={"ID":"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe","Type":"ContainerDied","Data":"b836d03926facb035157fc561fa40e4d5b0eeaa459d2dc5ec4aa8abf8a9cb149"} Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.766293 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b831a4b0-2508-4624-b8cb-833a8d5b10ad-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.766329 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjnpc\" (UniqueName: \"kubernetes.io/projected/b831a4b0-2508-4624-b8cb-833a8d5b10ad-kube-api-access-bjnpc\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.767223 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pq2q8" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.767214 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pq2q8" event={"ID":"71135c8c-e27a-4ae0-9e15-afe6164def89","Type":"ContainerDied","Data":"2eb471835bc11ce22740841fef04b65e26b300b7d80d24a7f43ab1506cd8014f"} Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.782604 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gwvnd" event={"ID":"e588820f-3563-4b98-9ba4-85da1cee2821","Type":"ContainerDied","Data":"0f3924682089918cf0b19123661c56e8e70a89eff21cf81eafbee6600d73ad54"} Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.782819 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gwvnd" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.796404 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-c6g96_b831a4b0-2508-4624-b8cb-833a8d5b10ad/registry-server/0.log" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.797198 4769 generic.go:334] "Generic (PLEG): container finished" podID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerID="9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d" exitCode=0 Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.797337 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6g96" event={"ID":"b831a4b0-2508-4624-b8cb-833a8d5b10ad","Type":"ContainerDied","Data":"9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d"} Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.797361 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6g96" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.797366 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6g96" event={"ID":"b831a4b0-2508-4624-b8cb-833a8d5b10ad","Type":"ContainerDied","Data":"6671f69131da140fc91ee06602c7b847c55d8657e59353faffc6e730f058ebc3"} Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.800523 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h57kt" event={"ID":"af5c9899-c379-4d60-9aae-979a20de2ad2","Type":"ContainerDied","Data":"9c104021f2827aae679fc6d341da6863bcbffb01d8d3dac32eb26d335ca836a9"} Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.800601 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h57kt" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.814956 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9hv66_7e334767-99a9-4a84-bcd7-e3e53d56750a/registry-server/0.log" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.816718 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9hv66_7e334767-99a9-4a84-bcd7-e3e53d56750a/registry-server/0.log" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.817914 4769 generic.go:334] "Generic (PLEG): container finished" podID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerID="fc071f5e0723a066e50b815205af45d4795644cf612a0459af9b2cc2a7ceec74" exitCode=0 Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.817984 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hv66" event={"ID":"7e334767-99a9-4a84-bcd7-e3e53d56750a","Type":"ContainerDied","Data":"fc071f5e0723a066e50b815205af45d4795644cf612a0459af9b2cc2a7ceec74"} Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.818013 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9hv66" event={"ID":"7e334767-99a9-4a84-bcd7-e3e53d56750a","Type":"ContainerDied","Data":"6ab6771c75c22ab79c6906ff36a11a6d867af22a27e9ab2fce7235739ce00516"} Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.818024 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ab6771c75c22ab79c6906ff36a11a6d867af22a27e9ab2fce7235739ce00516" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.824281 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.826271 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-856hd" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.851226 4769 scope.go:117] "RemoveContainer" containerID="7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.870777 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xs6l2\" (UniqueName: \"kubernetes.io/projected/7e334767-99a9-4a84-bcd7-e3e53d56750a-kube-api-access-xs6l2\") pod \"7e334767-99a9-4a84-bcd7-e3e53d56750a\" (UID: \"7e334767-99a9-4a84-bcd7-e3e53d56750a\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.870989 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e334767-99a9-4a84-bcd7-e3e53d56750a-catalog-content\") pod \"7e334767-99a9-4a84-bcd7-e3e53d56750a\" (UID: \"7e334767-99a9-4a84-bcd7-e3e53d56750a\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.871163 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e334767-99a9-4a84-bcd7-e3e53d56750a-utilities\") pod \"7e334767-99a9-4a84-bcd7-e3e53d56750a\" (UID: \"7e334767-99a9-4a84-bcd7-e3e53d56750a\") " Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.872111 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e334767-99a9-4a84-bcd7-e3e53d56750a-utilities" (OuterVolumeSpecName: "utilities") pod "7e334767-99a9-4a84-bcd7-e3e53d56750a" (UID: "7e334767-99a9-4a84-bcd7-e3e53d56750a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.884417 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e334767-99a9-4a84-bcd7-e3e53d56750a-kube-api-access-xs6l2" (OuterVolumeSpecName: "kube-api-access-xs6l2") pod "7e334767-99a9-4a84-bcd7-e3e53d56750a" (UID: "7e334767-99a9-4a84-bcd7-e3e53d56750a"). InnerVolumeSpecName "kube-api-access-xs6l2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.907111 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vr22m"] Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.916426 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b831a4b0-2508-4624-b8cb-833a8d5b10ad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b831a4b0-2508-4624-b8cb-833a8d5b10ad" (UID: "b831a4b0-2508-4624-b8cb-833a8d5b10ad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.923271 4769 scope.go:117] "RemoveContainer" containerID="543eaf3d2bfcd44951d40cc61618b80d30d796e12a6f97d497bcc3f393499c9d" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.932477 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vr22m"] Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.951318 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gkx9w"] Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.960900 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9v54j_bdd2d26d-cc13-41dd-9e57-fd8e81c627fe/registry-server/0.log" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.962171 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.966307 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gkx9w"] Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.975678 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e334767-99a9-4a84-bcd7-e3e53d56750a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.975711 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xs6l2\" (UniqueName: \"kubernetes.io/projected/7e334767-99a9-4a84-bcd7-e3e53d56750a-kube-api-access-xs6l2\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.975725 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b831a4b0-2508-4624-b8cb-833a8d5b10ad-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:08 crc kubenswrapper[4769]: I1125 11:14:08.985394 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pq2q8"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.003490 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pq2q8"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.018942 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h2wlq"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.019988 4769 scope.go:117] "RemoveContainer" containerID="96fd247d2f8cb17f2c17c5bdd0fdd8a11720d0e88d1f6adb71fcd0dde0eb9b99" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.035115 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-h2wlq"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.041554 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e334767-99a9-4a84-bcd7-e3e53d56750a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7e334767-99a9-4a84-bcd7-e3e53d56750a" (UID: "7e334767-99a9-4a84-bcd7-e3e53d56750a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.043361 4769 scope.go:117] "RemoveContainer" containerID="831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.046434 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h57kt"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.058946 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-h57kt"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.077118 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-catalog-content\") pod \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\" (UID: \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\") " Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.077368 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-utilities\") pod \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\" (UID: \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\") " Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.077451 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbbql\" (UniqueName: \"kubernetes.io/projected/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-kube-api-access-rbbql\") pod \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\" (UID: \"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe\") " Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.078109 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e334767-99a9-4a84-bcd7-e3e53d56750a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.082109 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-kube-api-access-rbbql" (OuterVolumeSpecName: "kube-api-access-rbbql") pod "bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" (UID: "bdd2d26d-cc13-41dd-9e57-fd8e81c627fe"). InnerVolumeSpecName "kube-api-access-rbbql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.085664 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-856hd"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.102689 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-856hd"] Nov 25 11:14:09 crc kubenswrapper[4769]: E1125 11:14:09.121992 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5\": container with ID starting with 831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5 not found: ID does not exist" containerID="831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.122054 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5"} err="failed to get container status \"831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5\": rpc error: code = NotFound desc = could not find container \"831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5\": container with ID starting with 831425ff1c6b6beac4d23fee8cd8d66b7eb231b9f1666130855ae0c44e49fea5 not found: ID does not exist" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.122091 4769 scope.go:117] "RemoveContainer" containerID="7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41" Nov 25 11:14:09 crc kubenswrapper[4769]: E1125 11:14:09.122786 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41\": container with ID starting with 7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41 not found: ID does not exist" containerID="7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.122823 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41"} err="failed to get container status \"7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41\": rpc error: code = NotFound desc = could not find container \"7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41\": container with ID starting with 7ee8f571ffb31393afe705799f437bbd6de94b66c4b25b6c4b3deeb4ceeddd41 not found: ID does not exist" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.122841 4769 scope.go:117] "RemoveContainer" containerID="543eaf3d2bfcd44951d40cc61618b80d30d796e12a6f97d497bcc3f393499c9d" Nov 25 11:14:09 crc kubenswrapper[4769]: E1125 11:14:09.123447 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"543eaf3d2bfcd44951d40cc61618b80d30d796e12a6f97d497bcc3f393499c9d\": container with ID starting with 543eaf3d2bfcd44951d40cc61618b80d30d796e12a6f97d497bcc3f393499c9d not found: ID does not exist" containerID="543eaf3d2bfcd44951d40cc61618b80d30d796e12a6f97d497bcc3f393499c9d" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.123485 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"543eaf3d2bfcd44951d40cc61618b80d30d796e12a6f97d497bcc3f393499c9d"} err="failed to get container status \"543eaf3d2bfcd44951d40cc61618b80d30d796e12a6f97d497bcc3f393499c9d\": rpc error: code = NotFound desc = could not find container \"543eaf3d2bfcd44951d40cc61618b80d30d796e12a6f97d497bcc3f393499c9d\": container with ID starting with 543eaf3d2bfcd44951d40cc61618b80d30d796e12a6f97d497bcc3f393499c9d not found: ID does not exist" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.123499 4769 scope.go:117] "RemoveContainer" containerID="96fd247d2f8cb17f2c17c5bdd0fdd8a11720d0e88d1f6adb71fcd0dde0eb9b99" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.123720 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-utilities" (OuterVolumeSpecName: "utilities") pod "bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" (UID: "bdd2d26d-cc13-41dd-9e57-fd8e81c627fe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:09 crc kubenswrapper[4769]: E1125 11:14:09.123812 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96fd247d2f8cb17f2c17c5bdd0fdd8a11720d0e88d1f6adb71fcd0dde0eb9b99\": container with ID starting with 96fd247d2f8cb17f2c17c5bdd0fdd8a11720d0e88d1f6adb71fcd0dde0eb9b99 not found: ID does not exist" containerID="96fd247d2f8cb17f2c17c5bdd0fdd8a11720d0e88d1f6adb71fcd0dde0eb9b99" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.123835 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96fd247d2f8cb17f2c17c5bdd0fdd8a11720d0e88d1f6adb71fcd0dde0eb9b99"} err="failed to get container status \"96fd247d2f8cb17f2c17c5bdd0fdd8a11720d0e88d1f6adb71fcd0dde0eb9b99\": rpc error: code = NotFound desc = could not find container \"96fd247d2f8cb17f2c17c5bdd0fdd8a11720d0e88d1f6adb71fcd0dde0eb9b99\": container with ID starting with 96fd247d2f8cb17f2c17c5bdd0fdd8a11720d0e88d1f6adb71fcd0dde0eb9b99 not found: ID does not exist" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.123850 4769 scope.go:117] "RemoveContainer" containerID="0f188e2c4c0b837c01d6ec185b9309ce415641bdf1e752b67ec2071fe9681798" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.141508 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gwvnd"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.152121 4769 scope.go:117] "RemoveContainer" containerID="5ee4eb2cabb4bfb20c07a1ac10dc8a00f3e727297791d718cb56b3962af6fdd1" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.157384 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gwvnd"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.176988 4769 scope.go:117] "RemoveContainer" containerID="bf85cbef29e4aeee6b319ac8803fce5bef0973e197737b718e1b27b03a082476" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.179727 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbbql\" (UniqueName: \"kubernetes.io/projected/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-kube-api-access-rbbql\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.179750 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.182111 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c6g96"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.200062 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c6g96"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.206045 4769 scope.go:117] "RemoveContainer" containerID="7984ec0fe84d0c0fa77453a85eb5526943846d43cc07424faa5e2780e477fba1" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.233632 4769 scope.go:117] "RemoveContainer" containerID="a1568be57c7beab61bb0a6b097832669595590c8290f7842f32a3054807d0107" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.245271 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" (UID: "bdd2d26d-cc13-41dd-9e57-fd8e81c627fe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.259561 4769 scope.go:117] "RemoveContainer" containerID="3e8eada057b1d52cc40fd819d5f756a11eaf48ff6396c948a0bcd5bf300e3898" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.282024 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.294351 4769 scope.go:117] "RemoveContainer" containerID="502ff72f42825fcb060ef3a6d40b8f79b30387790f9970563a7867bcba02e4ad" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.317057 4769 scope.go:117] "RemoveContainer" containerID="9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.341761 4769 scope.go:117] "RemoveContainer" containerID="eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.458350 4769 scope.go:117] "RemoveContainer" containerID="4cdb4d6b888281d31d627252cabf5488591fdad0dec842d0140089deb3e6dd96" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.498560 4769 scope.go:117] "RemoveContainer" containerID="cf737d64efee37e10188ee23890ddb22c0991065a33ac0bc92171731b9229042" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.547249 4769 scope.go:117] "RemoveContainer" containerID="9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d" Nov 25 11:14:09 crc kubenswrapper[4769]: E1125 11:14:09.547782 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d\": container with ID starting with 9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d not found: ID does not exist" containerID="9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.547822 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d"} err="failed to get container status \"9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d\": rpc error: code = NotFound desc = could not find container \"9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d\": container with ID starting with 9ea9ed614dd61aaaf48b170269a23e7f0b42ad623e6fd428f95b28435e92234d not found: ID does not exist" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.547847 4769 scope.go:117] "RemoveContainer" containerID="eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c" Nov 25 11:14:09 crc kubenswrapper[4769]: E1125 11:14:09.548414 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c\": container with ID starting with eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c not found: ID does not exist" containerID="eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.548442 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c"} err="failed to get container status \"eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c\": rpc error: code = NotFound desc = could not find container \"eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c\": container with ID starting with eda0ea0f825c571e9753ca1ba01c953cacbc67c2654df4dde4d29e6472a0522c not found: ID does not exist" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.548465 4769 scope.go:117] "RemoveContainer" containerID="4cdb4d6b888281d31d627252cabf5488591fdad0dec842d0140089deb3e6dd96" Nov 25 11:14:09 crc kubenswrapper[4769]: E1125 11:14:09.548876 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cdb4d6b888281d31d627252cabf5488591fdad0dec842d0140089deb3e6dd96\": container with ID starting with 4cdb4d6b888281d31d627252cabf5488591fdad0dec842d0140089deb3e6dd96 not found: ID does not exist" containerID="4cdb4d6b888281d31d627252cabf5488591fdad0dec842d0140089deb3e6dd96" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.548900 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cdb4d6b888281d31d627252cabf5488591fdad0dec842d0140089deb3e6dd96"} err="failed to get container status \"4cdb4d6b888281d31d627252cabf5488591fdad0dec842d0140089deb3e6dd96\": rpc error: code = NotFound desc = could not find container \"4cdb4d6b888281d31d627252cabf5488591fdad0dec842d0140089deb3e6dd96\": container with ID starting with 4cdb4d6b888281d31d627252cabf5488591fdad0dec842d0140089deb3e6dd96 not found: ID does not exist" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.548917 4769 scope.go:117] "RemoveContainer" containerID="cf737d64efee37e10188ee23890ddb22c0991065a33ac0bc92171731b9229042" Nov 25 11:14:09 crc kubenswrapper[4769]: E1125 11:14:09.549194 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf737d64efee37e10188ee23890ddb22c0991065a33ac0bc92171731b9229042\": container with ID starting with cf737d64efee37e10188ee23890ddb22c0991065a33ac0bc92171731b9229042 not found: ID does not exist" containerID="cf737d64efee37e10188ee23890ddb22c0991065a33ac0bc92171731b9229042" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.549217 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf737d64efee37e10188ee23890ddb22c0991065a33ac0bc92171731b9229042"} err="failed to get container status \"cf737d64efee37e10188ee23890ddb22c0991065a33ac0bc92171731b9229042\": rpc error: code = NotFound desc = could not find container \"cf737d64efee37e10188ee23890ddb22c0991065a33ac0bc92171731b9229042\": container with ID starting with cf737d64efee37e10188ee23890ddb22c0991065a33ac0bc92171731b9229042 not found: ID does not exist" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.549233 4769 scope.go:117] "RemoveContainer" containerID="86b51cfdab8c3140e15a460449dbe43cf35a1d4a2038624f852243daba5403fc" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.600528 4769 scope.go:117] "RemoveContainer" containerID="633cd28ec7fb44b95489c8af0cfbcffaf11ffc0e9c1961b65114bcbe0841dba6" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.633466 4769 scope.go:117] "RemoveContainer" containerID="2a4a84b663d988f8a339e2eb208887afadc2463fdc16121db9034fac9c3b993b" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.723905 4769 scope.go:117] "RemoveContainer" containerID="2a261c0d637945ef58e5165df0fc32835e944bf6bd07452f4b65407653066e62" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.845424 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9v54j" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.845413 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9v54j" event={"ID":"bdd2d26d-cc13-41dd-9e57-fd8e81c627fe","Type":"ContainerDied","Data":"1205df62446f302424f6769a0877b5e5e1601534721e04f29f330d98d3938947"} Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.845584 4769 scope.go:117] "RemoveContainer" containerID="b836d03926facb035157fc561fa40e4d5b0eeaa459d2dc5ec4aa8abf8a9cb149" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.866444 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9hv66" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.885111 4769 scope.go:117] "RemoveContainer" containerID="d632444cd9b6b05bdddaf9e5883e0e245e2d64c8856e489fca8a481aa146d254" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.923320 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9v54j"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.927017 4769 scope.go:117] "RemoveContainer" containerID="29250a081b2e2ceb2220a152dffa21d16bcb7ae8f7b53d1cc048a0ba38317e17" Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.939728 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9v54j"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.966089 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9hv66"] Nov 25 11:14:09 crc kubenswrapper[4769]: I1125 11:14:09.979409 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9hv66"] Nov 25 11:14:10 crc kubenswrapper[4769]: I1125 11:14:10.249890 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" path="/var/lib/kubelet/pods/4a6225f9-cdc5-486a-b813-db81a752fade/volumes" Nov 25 11:14:10 crc kubenswrapper[4769]: I1125 11:14:10.250739 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" path="/var/lib/kubelet/pods/71135c8c-e27a-4ae0-9e15-afe6164def89/volumes" Nov 25 11:14:10 crc kubenswrapper[4769]: I1125 11:14:10.251448 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" path="/var/lib/kubelet/pods/7e334767-99a9-4a84-bcd7-e3e53d56750a/volumes" Nov 25 11:14:10 crc kubenswrapper[4769]: I1125 11:14:10.252578 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="993bc795-fde2-4876-9e76-e97a6095576d" path="/var/lib/kubelet/pods/993bc795-fde2-4876-9e76-e97a6095576d/volumes" Nov 25 11:14:10 crc kubenswrapper[4769]: I1125 11:14:10.253269 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" path="/var/lib/kubelet/pods/af5c9899-c379-4d60-9aae-979a20de2ad2/volumes" Nov 25 11:14:10 crc kubenswrapper[4769]: I1125 11:14:10.260119 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" path="/var/lib/kubelet/pods/b831a4b0-2508-4624-b8cb-833a8d5b10ad/volumes" Nov 25 11:14:10 crc kubenswrapper[4769]: I1125 11:14:10.260820 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" path="/var/lib/kubelet/pods/bdd2d26d-cc13-41dd-9e57-fd8e81c627fe/volumes" Nov 25 11:14:10 crc kubenswrapper[4769]: I1125 11:14:10.262491 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" path="/var/lib/kubelet/pods/e588820f-3563-4b98-9ba4-85da1cee2821/volumes" Nov 25 11:14:10 crc kubenswrapper[4769]: I1125 11:14:10.263184 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" path="/var/lib/kubelet/pods/e9d04b78-f610-43ce-bc66-ca181acf6654/volumes" Nov 25 11:14:10 crc kubenswrapper[4769]: I1125 11:14:10.263862 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" path="/var/lib/kubelet/pods/f1b1fb95-30bd-457e-b59c-c06e6b770f42/volumes" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.457359 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pd2j4/must-gather-5nnmn"] Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.462922 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463561 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463606 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463614 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463627 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463633 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463643 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463649 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463657 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463663 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463680 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463685 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463693 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463698 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463709 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463716 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463729 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463737 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463747 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463754 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463766 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463773 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463789 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463799 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463811 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463818 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463838 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463848 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463857 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463863 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463875 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463885 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463898 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463903 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463914 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463919 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463927 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463932 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463952 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463974 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.463987 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.463994 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464011 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464017 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464027 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464032 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464041 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464047 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464055 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464063 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464075 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464096 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464121 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464128 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464143 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464151 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464169 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464177 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464187 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464195 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464209 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464216 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464250 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464258 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464270 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464276 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464288 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464293 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464304 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464310 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464316 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464321 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464334 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464339 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="extract-utilities" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464348 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464355 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464363 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464380 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464389 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464395 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464406 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464413 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.464420 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464426 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="extract-content" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464690 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464722 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464735 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464775 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464785 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464804 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464824 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464843 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464853 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="e588820f-3563-4b98-9ba4-85da1cee2821" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464876 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="36eb2a8b-ac79-4440-a6e8-ebe9db9dfa24" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464891 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdd2d26d-cc13-41dd-9e57-fd8e81c627fe" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464916 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464925 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464940 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464948 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.464988 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a6225f9-cdc5-486a-b813-db81a752fade" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.465006 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="af5c9899-c379-4d60-9aae-979a20de2ad2" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.465425 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.465432 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: E1125 11:14:19.465450 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.465458 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e334767-99a9-4a84-bcd7-e3e53d56750a" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.465766 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="993bc795-fde2-4876-9e76-e97a6095576d" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.465806 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1b1fb95-30bd-457e-b59c-c06e6b770f42" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.465816 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="71135c8c-e27a-4ae0-9e15-afe6164def89" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.465829 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="b831a4b0-2508-4624-b8cb-833a8d5b10ad" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.465855 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9d04b78-f610-43ce-bc66-ca181acf6654" containerName="registry-server" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.483864 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/must-gather-5nnmn" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.517185 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-pd2j4"/"openshift-service-ca.crt" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.517870 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-pd2j4"/"kube-root-ca.crt" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.563489 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvn7p\" (UniqueName: \"kubernetes.io/projected/947890df-6173-4295-985a-8039cc77cca4-kube-api-access-nvn7p\") pod \"must-gather-5nnmn\" (UID: \"947890df-6173-4295-985a-8039cc77cca4\") " pod="openshift-must-gather-pd2j4/must-gather-5nnmn" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.563597 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/947890df-6173-4295-985a-8039cc77cca4-must-gather-output\") pod \"must-gather-5nnmn\" (UID: \"947890df-6173-4295-985a-8039cc77cca4\") " pod="openshift-must-gather-pd2j4/must-gather-5nnmn" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.665632 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvn7p\" (UniqueName: \"kubernetes.io/projected/947890df-6173-4295-985a-8039cc77cca4-kube-api-access-nvn7p\") pod \"must-gather-5nnmn\" (UID: \"947890df-6173-4295-985a-8039cc77cca4\") " pod="openshift-must-gather-pd2j4/must-gather-5nnmn" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.665763 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/947890df-6173-4295-985a-8039cc77cca4-must-gather-output\") pod \"must-gather-5nnmn\" (UID: \"947890df-6173-4295-985a-8039cc77cca4\") " pod="openshift-must-gather-pd2j4/must-gather-5nnmn" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.666132 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/947890df-6173-4295-985a-8039cc77cca4-must-gather-output\") pod \"must-gather-5nnmn\" (UID: \"947890df-6173-4295-985a-8039cc77cca4\") " pod="openshift-must-gather-pd2j4/must-gather-5nnmn" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.685201 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvn7p\" (UniqueName: \"kubernetes.io/projected/947890df-6173-4295-985a-8039cc77cca4-kube-api-access-nvn7p\") pod \"must-gather-5nnmn\" (UID: \"947890df-6173-4295-985a-8039cc77cca4\") " pod="openshift-must-gather-pd2j4/must-gather-5nnmn" Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.770035 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-pd2j4/must-gather-5nnmn"] Nov 25 11:14:19 crc kubenswrapper[4769]: I1125 11:14:19.815765 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/must-gather-5nnmn" Nov 25 11:14:20 crc kubenswrapper[4769]: I1125 11:14:20.236832 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:14:20 crc kubenswrapper[4769]: E1125 11:14:20.237384 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:14:20 crc kubenswrapper[4769]: I1125 11:14:20.503936 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-pd2j4/must-gather-5nnmn"] Nov 25 11:14:21 crc kubenswrapper[4769]: I1125 11:14:21.041896 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pd2j4/must-gather-5nnmn" event={"ID":"947890df-6173-4295-985a-8039cc77cca4","Type":"ContainerStarted","Data":"3b800c448e5c37349c96627bddda011cec84aa6fa0e570446c1544e1e3a07db4"} Nov 25 11:14:27 crc kubenswrapper[4769]: I1125 11:14:27.118352 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pd2j4/must-gather-5nnmn" event={"ID":"947890df-6173-4295-985a-8039cc77cca4","Type":"ContainerStarted","Data":"b92d50d366e551d74d95c4369d526dd50766ca69b7dc3430c23092d1c50dfef0"} Nov 25 11:14:27 crc kubenswrapper[4769]: I1125 11:14:27.119016 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pd2j4/must-gather-5nnmn" event={"ID":"947890df-6173-4295-985a-8039cc77cca4","Type":"ContainerStarted","Data":"1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff"} Nov 25 11:14:27 crc kubenswrapper[4769]: I1125 11:14:27.137314 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pd2j4/must-gather-5nnmn" podStartSLOduration=2.677288372 podStartE2EDuration="8.137283248s" podCreationTimestamp="2025-11-25 11:14:19 +0000 UTC" firstStartedPulling="2025-11-25 11:14:20.512186747 +0000 UTC m=+5409.097159060" lastFinishedPulling="2025-11-25 11:14:25.972181613 +0000 UTC m=+5414.557153936" observedRunningTime="2025-11-25 11:14:27.134446012 +0000 UTC m=+5415.719418345" watchObservedRunningTime="2025-11-25 11:14:27.137283248 +0000 UTC m=+5415.722255571" Nov 25 11:14:32 crc kubenswrapper[4769]: I1125 11:14:32.249879 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:14:33 crc kubenswrapper[4769]: I1125 11:14:33.201329 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"774bc57830d3ea61329fafb65ae2f3776de79f07090f20a76a66522b46be9570"} Nov 25 11:14:35 crc kubenswrapper[4769]: I1125 11:14:35.012585 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pd2j4/crc-debug-x4jfv"] Nov 25 11:14:35 crc kubenswrapper[4769]: I1125 11:14:35.014622 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" Nov 25 11:14:35 crc kubenswrapper[4769]: I1125 11:14:35.016886 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-pd2j4"/"default-dockercfg-hl7sn" Nov 25 11:14:35 crc kubenswrapper[4769]: I1125 11:14:35.106042 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6c5b029e-d497-4a8c-b2ee-4f42074aa74b-host\") pod \"crc-debug-x4jfv\" (UID: \"6c5b029e-d497-4a8c-b2ee-4f42074aa74b\") " pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" Nov 25 11:14:35 crc kubenswrapper[4769]: I1125 11:14:35.106193 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c2ss\" (UniqueName: \"kubernetes.io/projected/6c5b029e-d497-4a8c-b2ee-4f42074aa74b-kube-api-access-9c2ss\") pod \"crc-debug-x4jfv\" (UID: \"6c5b029e-d497-4a8c-b2ee-4f42074aa74b\") " pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" Nov 25 11:14:35 crc kubenswrapper[4769]: I1125 11:14:35.208838 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6c5b029e-d497-4a8c-b2ee-4f42074aa74b-host\") pod \"crc-debug-x4jfv\" (UID: \"6c5b029e-d497-4a8c-b2ee-4f42074aa74b\") " pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" Nov 25 11:14:35 crc kubenswrapper[4769]: I1125 11:14:35.209012 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c2ss\" (UniqueName: \"kubernetes.io/projected/6c5b029e-d497-4a8c-b2ee-4f42074aa74b-kube-api-access-9c2ss\") pod \"crc-debug-x4jfv\" (UID: \"6c5b029e-d497-4a8c-b2ee-4f42074aa74b\") " pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" Nov 25 11:14:35 crc kubenswrapper[4769]: I1125 11:14:35.209976 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6c5b029e-d497-4a8c-b2ee-4f42074aa74b-host\") pod \"crc-debug-x4jfv\" (UID: \"6c5b029e-d497-4a8c-b2ee-4f42074aa74b\") " pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" Nov 25 11:14:35 crc kubenswrapper[4769]: I1125 11:14:35.232778 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c2ss\" (UniqueName: \"kubernetes.io/projected/6c5b029e-d497-4a8c-b2ee-4f42074aa74b-kube-api-access-9c2ss\") pod \"crc-debug-x4jfv\" (UID: \"6c5b029e-d497-4a8c-b2ee-4f42074aa74b\") " pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" Nov 25 11:14:35 crc kubenswrapper[4769]: I1125 11:14:35.338641 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" Nov 25 11:14:36 crc kubenswrapper[4769]: I1125 11:14:36.254233 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" event={"ID":"6c5b029e-d497-4a8c-b2ee-4f42074aa74b","Type":"ContainerStarted","Data":"197d85c60cdfc71943f640ac25018afef929cb756eedda125be0aebbef056532"} Nov 25 11:14:49 crc kubenswrapper[4769]: I1125 11:14:49.402144 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" event={"ID":"6c5b029e-d497-4a8c-b2ee-4f42074aa74b","Type":"ContainerStarted","Data":"dea5af91b1133d945c8eb44a81e8869d8b26897882c39093eb5d7c561e5b486d"} Nov 25 11:14:49 crc kubenswrapper[4769]: I1125 11:14:49.421921 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" podStartSLOduration=2.18238827 podStartE2EDuration="15.421900348s" podCreationTimestamp="2025-11-25 11:14:34 +0000 UTC" firstStartedPulling="2025-11-25 11:14:35.400357082 +0000 UTC m=+5423.985329395" lastFinishedPulling="2025-11-25 11:14:48.63986916 +0000 UTC m=+5437.224841473" observedRunningTime="2025-11-25 11:14:49.419511495 +0000 UTC m=+5438.004483818" watchObservedRunningTime="2025-11-25 11:14:49.421900348 +0000 UTC m=+5438.006872671" Nov 25 11:14:52 crc kubenswrapper[4769]: I1125 11:14:52.092952 4769 trace.go:236] Trace[1817154164]: "Calculate volume metrics of catalog-content for pod openshift-marketplace/redhat-operators-426q9" (25-Nov-2025 11:14:51.066) (total time: 1026ms): Nov 25 11:14:52 crc kubenswrapper[4769]: Trace[1817154164]: [1.026162035s] [1.026162035s] END Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.266384 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl"] Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.269546 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.273383 4769 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.273569 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.303521 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl"] Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.344438 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91a9ef47-2886-42de-877d-63a0f59e0248-config-volume\") pod \"collect-profiles-29401155-vtkbl\" (UID: \"91a9ef47-2886-42de-877d-63a0f59e0248\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.344723 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91a9ef47-2886-42de-877d-63a0f59e0248-secret-volume\") pod \"collect-profiles-29401155-vtkbl\" (UID: \"91a9ef47-2886-42de-877d-63a0f59e0248\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.344807 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwt28\" (UniqueName: \"kubernetes.io/projected/91a9ef47-2886-42de-877d-63a0f59e0248-kube-api-access-wwt28\") pod \"collect-profiles-29401155-vtkbl\" (UID: \"91a9ef47-2886-42de-877d-63a0f59e0248\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.447390 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91a9ef47-2886-42de-877d-63a0f59e0248-config-volume\") pod \"collect-profiles-29401155-vtkbl\" (UID: \"91a9ef47-2886-42de-877d-63a0f59e0248\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.447457 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91a9ef47-2886-42de-877d-63a0f59e0248-secret-volume\") pod \"collect-profiles-29401155-vtkbl\" (UID: \"91a9ef47-2886-42de-877d-63a0f59e0248\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.447490 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwt28\" (UniqueName: \"kubernetes.io/projected/91a9ef47-2886-42de-877d-63a0f59e0248-kube-api-access-wwt28\") pod \"collect-profiles-29401155-vtkbl\" (UID: \"91a9ef47-2886-42de-877d-63a0f59e0248\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.449028 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91a9ef47-2886-42de-877d-63a0f59e0248-config-volume\") pod \"collect-profiles-29401155-vtkbl\" (UID: \"91a9ef47-2886-42de-877d-63a0f59e0248\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.457914 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91a9ef47-2886-42de-877d-63a0f59e0248-secret-volume\") pod \"collect-profiles-29401155-vtkbl\" (UID: \"91a9ef47-2886-42de-877d-63a0f59e0248\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.468649 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwt28\" (UniqueName: \"kubernetes.io/projected/91a9ef47-2886-42de-877d-63a0f59e0248-kube-api-access-wwt28\") pod \"collect-profiles-29401155-vtkbl\" (UID: \"91a9ef47-2886-42de-877d-63a0f59e0248\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:00 crc kubenswrapper[4769]: I1125 11:15:00.603073 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:04 crc kubenswrapper[4769]: I1125 11:15:04.206513 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl"] Nov 25 11:15:04 crc kubenswrapper[4769]: I1125 11:15:04.645178 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" event={"ID":"91a9ef47-2886-42de-877d-63a0f59e0248","Type":"ContainerStarted","Data":"f3e3830135b4d7d57e7c8dfd469aff783565a2d1174343cb396a53ce1795b2ec"} Nov 25 11:15:04 crc kubenswrapper[4769]: I1125 11:15:04.645687 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" event={"ID":"91a9ef47-2886-42de-877d-63a0f59e0248","Type":"ContainerStarted","Data":"dacaeb821adfb0486ea05c03832cb3306e451d88904d0ae205c0f20461755672"} Nov 25 11:15:04 crc kubenswrapper[4769]: I1125 11:15:04.668307 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" podStartSLOduration=4.668282442 podStartE2EDuration="4.668282442s" podCreationTimestamp="2025-11-25 11:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:15:04.657891227 +0000 UTC m=+5453.242863540" watchObservedRunningTime="2025-11-25 11:15:04.668282442 +0000 UTC m=+5453.253254755" Nov 25 11:15:05 crc kubenswrapper[4769]: I1125 11:15:05.657389 4769 generic.go:334] "Generic (PLEG): container finished" podID="91a9ef47-2886-42de-877d-63a0f59e0248" containerID="f3e3830135b4d7d57e7c8dfd469aff783565a2d1174343cb396a53ce1795b2ec" exitCode=0 Nov 25 11:15:05 crc kubenswrapper[4769]: I1125 11:15:05.657452 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" event={"ID":"91a9ef47-2886-42de-877d-63a0f59e0248","Type":"ContainerDied","Data":"f3e3830135b4d7d57e7c8dfd469aff783565a2d1174343cb396a53ce1795b2ec"} Nov 25 11:15:07 crc kubenswrapper[4769]: I1125 11:15:07.942795 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:08 crc kubenswrapper[4769]: I1125 11:15:08.037952 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwt28\" (UniqueName: \"kubernetes.io/projected/91a9ef47-2886-42de-877d-63a0f59e0248-kube-api-access-wwt28\") pod \"91a9ef47-2886-42de-877d-63a0f59e0248\" (UID: \"91a9ef47-2886-42de-877d-63a0f59e0248\") " Nov 25 11:15:08 crc kubenswrapper[4769]: I1125 11:15:08.038118 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91a9ef47-2886-42de-877d-63a0f59e0248-secret-volume\") pod \"91a9ef47-2886-42de-877d-63a0f59e0248\" (UID: \"91a9ef47-2886-42de-877d-63a0f59e0248\") " Nov 25 11:15:08 crc kubenswrapper[4769]: I1125 11:15:08.038224 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91a9ef47-2886-42de-877d-63a0f59e0248-config-volume\") pod \"91a9ef47-2886-42de-877d-63a0f59e0248\" (UID: \"91a9ef47-2886-42de-877d-63a0f59e0248\") " Nov 25 11:15:08 crc kubenswrapper[4769]: I1125 11:15:08.039868 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91a9ef47-2886-42de-877d-63a0f59e0248-config-volume" (OuterVolumeSpecName: "config-volume") pod "91a9ef47-2886-42de-877d-63a0f59e0248" (UID: "91a9ef47-2886-42de-877d-63a0f59e0248"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:15:08 crc kubenswrapper[4769]: I1125 11:15:08.048536 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91a9ef47-2886-42de-877d-63a0f59e0248-kube-api-access-wwt28" (OuterVolumeSpecName: "kube-api-access-wwt28") pod "91a9ef47-2886-42de-877d-63a0f59e0248" (UID: "91a9ef47-2886-42de-877d-63a0f59e0248"). InnerVolumeSpecName "kube-api-access-wwt28". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:15:08 crc kubenswrapper[4769]: I1125 11:15:08.053033 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91a9ef47-2886-42de-877d-63a0f59e0248-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "91a9ef47-2886-42de-877d-63a0f59e0248" (UID: "91a9ef47-2886-42de-877d-63a0f59e0248"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:15:08 crc kubenswrapper[4769]: I1125 11:15:08.141819 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwt28\" (UniqueName: \"kubernetes.io/projected/91a9ef47-2886-42de-877d-63a0f59e0248-kube-api-access-wwt28\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:08 crc kubenswrapper[4769]: I1125 11:15:08.141858 4769 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91a9ef47-2886-42de-877d-63a0f59e0248-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:08 crc kubenswrapper[4769]: I1125 11:15:08.141872 4769 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91a9ef47-2886-42de-877d-63a0f59e0248-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:08 crc kubenswrapper[4769]: I1125 11:15:08.698665 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" event={"ID":"91a9ef47-2886-42de-877d-63a0f59e0248","Type":"ContainerDied","Data":"dacaeb821adfb0486ea05c03832cb3306e451d88904d0ae205c0f20461755672"} Nov 25 11:15:08 crc kubenswrapper[4769]: I1125 11:15:08.698925 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dacaeb821adfb0486ea05c03832cb3306e451d88904d0ae205c0f20461755672" Nov 25 11:15:08 crc kubenswrapper[4769]: I1125 11:15:08.698712 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-vtkbl" Nov 25 11:15:09 crc kubenswrapper[4769]: I1125 11:15:09.040670 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq"] Nov 25 11:15:09 crc kubenswrapper[4769]: I1125 11:15:09.055591 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-knhbq"] Nov 25 11:15:10 crc kubenswrapper[4769]: I1125 11:15:10.257784 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="868a47b4-3718-435c-a357-bfef84fd4d51" path="/var/lib/kubelet/pods/868a47b4-3718-435c-a357-bfef84fd4d51/volumes" Nov 25 11:15:42 crc kubenswrapper[4769]: I1125 11:15:42.076794 4769 generic.go:334] "Generic (PLEG): container finished" podID="6c5b029e-d497-4a8c-b2ee-4f42074aa74b" containerID="dea5af91b1133d945c8eb44a81e8869d8b26897882c39093eb5d7c561e5b486d" exitCode=0 Nov 25 11:15:42 crc kubenswrapper[4769]: I1125 11:15:42.076879 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" event={"ID":"6c5b029e-d497-4a8c-b2ee-4f42074aa74b","Type":"ContainerDied","Data":"dea5af91b1133d945c8eb44a81e8869d8b26897882c39093eb5d7c561e5b486d"} Nov 25 11:15:43 crc kubenswrapper[4769]: I1125 11:15:43.256806 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" Nov 25 11:15:43 crc kubenswrapper[4769]: I1125 11:15:43.303812 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pd2j4/crc-debug-x4jfv"] Nov 25 11:15:43 crc kubenswrapper[4769]: I1125 11:15:43.318089 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pd2j4/crc-debug-x4jfv"] Nov 25 11:15:43 crc kubenswrapper[4769]: I1125 11:15:43.346343 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9c2ss\" (UniqueName: \"kubernetes.io/projected/6c5b029e-d497-4a8c-b2ee-4f42074aa74b-kube-api-access-9c2ss\") pod \"6c5b029e-d497-4a8c-b2ee-4f42074aa74b\" (UID: \"6c5b029e-d497-4a8c-b2ee-4f42074aa74b\") " Nov 25 11:15:43 crc kubenswrapper[4769]: I1125 11:15:43.346485 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6c5b029e-d497-4a8c-b2ee-4f42074aa74b-host\") pod \"6c5b029e-d497-4a8c-b2ee-4f42074aa74b\" (UID: \"6c5b029e-d497-4a8c-b2ee-4f42074aa74b\") " Nov 25 11:15:43 crc kubenswrapper[4769]: I1125 11:15:43.346601 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6c5b029e-d497-4a8c-b2ee-4f42074aa74b-host" (OuterVolumeSpecName: "host") pod "6c5b029e-d497-4a8c-b2ee-4f42074aa74b" (UID: "6c5b029e-d497-4a8c-b2ee-4f42074aa74b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:15:43 crc kubenswrapper[4769]: I1125 11:15:43.347322 4769 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6c5b029e-d497-4a8c-b2ee-4f42074aa74b-host\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:43 crc kubenswrapper[4769]: I1125 11:15:43.351229 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c5b029e-d497-4a8c-b2ee-4f42074aa74b-kube-api-access-9c2ss" (OuterVolumeSpecName: "kube-api-access-9c2ss") pod "6c5b029e-d497-4a8c-b2ee-4f42074aa74b" (UID: "6c5b029e-d497-4a8c-b2ee-4f42074aa74b"). InnerVolumeSpecName "kube-api-access-9c2ss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:15:43 crc kubenswrapper[4769]: I1125 11:15:43.450253 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9c2ss\" (UniqueName: \"kubernetes.io/projected/6c5b029e-d497-4a8c-b2ee-4f42074aa74b-kube-api-access-9c2ss\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.103359 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="197d85c60cdfc71943f640ac25018afef929cb756eedda125be0aebbef056532" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.103391 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/crc-debug-x4jfv" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.252372 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c5b029e-d497-4a8c-b2ee-4f42074aa74b" path="/var/lib/kubelet/pods/6c5b029e-d497-4a8c-b2ee-4f42074aa74b/volumes" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.538901 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pd2j4/crc-debug-bc5fx"] Nov 25 11:15:44 crc kubenswrapper[4769]: E1125 11:15:44.539664 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c5b029e-d497-4a8c-b2ee-4f42074aa74b" containerName="container-00" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.539681 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c5b029e-d497-4a8c-b2ee-4f42074aa74b" containerName="container-00" Nov 25 11:15:44 crc kubenswrapper[4769]: E1125 11:15:44.539747 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91a9ef47-2886-42de-877d-63a0f59e0248" containerName="collect-profiles" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.539756 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="91a9ef47-2886-42de-877d-63a0f59e0248" containerName="collect-profiles" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.540084 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c5b029e-d497-4a8c-b2ee-4f42074aa74b" containerName="container-00" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.540122 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="91a9ef47-2886-42de-877d-63a0f59e0248" containerName="collect-profiles" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.541095 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/crc-debug-bc5fx" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.543274 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-pd2j4"/"default-dockercfg-hl7sn" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.678507 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2j4r\" (UniqueName: \"kubernetes.io/projected/ff31be3d-83e8-4419-ac05-e11ef5af8536-kube-api-access-n2j4r\") pod \"crc-debug-bc5fx\" (UID: \"ff31be3d-83e8-4419-ac05-e11ef5af8536\") " pod="openshift-must-gather-pd2j4/crc-debug-bc5fx" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.678808 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ff31be3d-83e8-4419-ac05-e11ef5af8536-host\") pod \"crc-debug-bc5fx\" (UID: \"ff31be3d-83e8-4419-ac05-e11ef5af8536\") " pod="openshift-must-gather-pd2j4/crc-debug-bc5fx" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.781190 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2j4r\" (UniqueName: \"kubernetes.io/projected/ff31be3d-83e8-4419-ac05-e11ef5af8536-kube-api-access-n2j4r\") pod \"crc-debug-bc5fx\" (UID: \"ff31be3d-83e8-4419-ac05-e11ef5af8536\") " pod="openshift-must-gather-pd2j4/crc-debug-bc5fx" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.781426 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ff31be3d-83e8-4419-ac05-e11ef5af8536-host\") pod \"crc-debug-bc5fx\" (UID: \"ff31be3d-83e8-4419-ac05-e11ef5af8536\") " pod="openshift-must-gather-pd2j4/crc-debug-bc5fx" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.781580 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ff31be3d-83e8-4419-ac05-e11ef5af8536-host\") pod \"crc-debug-bc5fx\" (UID: \"ff31be3d-83e8-4419-ac05-e11ef5af8536\") " pod="openshift-must-gather-pd2j4/crc-debug-bc5fx" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.803520 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2j4r\" (UniqueName: \"kubernetes.io/projected/ff31be3d-83e8-4419-ac05-e11ef5af8536-kube-api-access-n2j4r\") pod \"crc-debug-bc5fx\" (UID: \"ff31be3d-83e8-4419-ac05-e11ef5af8536\") " pod="openshift-must-gather-pd2j4/crc-debug-bc5fx" Nov 25 11:15:44 crc kubenswrapper[4769]: I1125 11:15:44.860815 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/crc-debug-bc5fx" Nov 25 11:15:45 crc kubenswrapper[4769]: I1125 11:15:45.119540 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pd2j4/crc-debug-bc5fx" event={"ID":"ff31be3d-83e8-4419-ac05-e11ef5af8536","Type":"ContainerStarted","Data":"15f3a6be01cccb914a07be030c235ded28688a56b16d4b0ed98aaeabb11c7df9"} Nov 25 11:15:46 crc kubenswrapper[4769]: I1125 11:15:46.229711 4769 generic.go:334] "Generic (PLEG): container finished" podID="ff31be3d-83e8-4419-ac05-e11ef5af8536" containerID="8297aa2bbd49704be3acbc5d78d75659bc41a86943208eb6c500eae3be397506" exitCode=0 Nov 25 11:15:46 crc kubenswrapper[4769]: I1125 11:15:46.230032 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pd2j4/crc-debug-bc5fx" event={"ID":"ff31be3d-83e8-4419-ac05-e11ef5af8536","Type":"ContainerDied","Data":"8297aa2bbd49704be3acbc5d78d75659bc41a86943208eb6c500eae3be397506"} Nov 25 11:15:47 crc kubenswrapper[4769]: I1125 11:15:47.380106 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/crc-debug-bc5fx" Nov 25 11:15:47 crc kubenswrapper[4769]: I1125 11:15:47.461287 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2j4r\" (UniqueName: \"kubernetes.io/projected/ff31be3d-83e8-4419-ac05-e11ef5af8536-kube-api-access-n2j4r\") pod \"ff31be3d-83e8-4419-ac05-e11ef5af8536\" (UID: \"ff31be3d-83e8-4419-ac05-e11ef5af8536\") " Nov 25 11:15:47 crc kubenswrapper[4769]: I1125 11:15:47.461976 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ff31be3d-83e8-4419-ac05-e11ef5af8536-host\") pod \"ff31be3d-83e8-4419-ac05-e11ef5af8536\" (UID: \"ff31be3d-83e8-4419-ac05-e11ef5af8536\") " Nov 25 11:15:47 crc kubenswrapper[4769]: I1125 11:15:47.462160 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ff31be3d-83e8-4419-ac05-e11ef5af8536-host" (OuterVolumeSpecName: "host") pod "ff31be3d-83e8-4419-ac05-e11ef5af8536" (UID: "ff31be3d-83e8-4419-ac05-e11ef5af8536"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:15:47 crc kubenswrapper[4769]: I1125 11:15:47.462664 4769 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ff31be3d-83e8-4419-ac05-e11ef5af8536-host\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:47 crc kubenswrapper[4769]: I1125 11:15:47.473659 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff31be3d-83e8-4419-ac05-e11ef5af8536-kube-api-access-n2j4r" (OuterVolumeSpecName: "kube-api-access-n2j4r") pod "ff31be3d-83e8-4419-ac05-e11ef5af8536" (UID: "ff31be3d-83e8-4419-ac05-e11ef5af8536"). InnerVolumeSpecName "kube-api-access-n2j4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:15:47 crc kubenswrapper[4769]: I1125 11:15:47.564952 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2j4r\" (UniqueName: \"kubernetes.io/projected/ff31be3d-83e8-4419-ac05-e11ef5af8536-kube-api-access-n2j4r\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:48 crc kubenswrapper[4769]: I1125 11:15:48.096333 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pd2j4/crc-debug-bc5fx"] Nov 25 11:15:48 crc kubenswrapper[4769]: I1125 11:15:48.111641 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pd2j4/crc-debug-bc5fx"] Nov 25 11:15:48 crc kubenswrapper[4769]: I1125 11:15:48.254514 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff31be3d-83e8-4419-ac05-e11ef5af8536" path="/var/lib/kubelet/pods/ff31be3d-83e8-4419-ac05-e11ef5af8536/volumes" Nov 25 11:15:48 crc kubenswrapper[4769]: I1125 11:15:48.258092 4769 scope.go:117] "RemoveContainer" containerID="8297aa2bbd49704be3acbc5d78d75659bc41a86943208eb6c500eae3be397506" Nov 25 11:15:48 crc kubenswrapper[4769]: I1125 11:15:48.258310 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/crc-debug-bc5fx" Nov 25 11:15:49 crc kubenswrapper[4769]: I1125 11:15:49.337158 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pd2j4/crc-debug-ww6jp"] Nov 25 11:15:49 crc kubenswrapper[4769]: E1125 11:15:49.338218 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff31be3d-83e8-4419-ac05-e11ef5af8536" containerName="container-00" Nov 25 11:15:49 crc kubenswrapper[4769]: I1125 11:15:49.338231 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff31be3d-83e8-4419-ac05-e11ef5af8536" containerName="container-00" Nov 25 11:15:49 crc kubenswrapper[4769]: I1125 11:15:49.338484 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff31be3d-83e8-4419-ac05-e11ef5af8536" containerName="container-00" Nov 25 11:15:49 crc kubenswrapper[4769]: I1125 11:15:49.339327 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/crc-debug-ww6jp" Nov 25 11:15:49 crc kubenswrapper[4769]: I1125 11:15:49.342349 4769 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-pd2j4"/"default-dockercfg-hl7sn" Nov 25 11:15:49 crc kubenswrapper[4769]: I1125 11:15:49.418385 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dc574b28-b81a-4c11-a88d-2b6aa034922f-host\") pod \"crc-debug-ww6jp\" (UID: \"dc574b28-b81a-4c11-a88d-2b6aa034922f\") " pod="openshift-must-gather-pd2j4/crc-debug-ww6jp" Nov 25 11:15:49 crc kubenswrapper[4769]: I1125 11:15:49.418596 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h4h5\" (UniqueName: \"kubernetes.io/projected/dc574b28-b81a-4c11-a88d-2b6aa034922f-kube-api-access-8h4h5\") pod \"crc-debug-ww6jp\" (UID: \"dc574b28-b81a-4c11-a88d-2b6aa034922f\") " pod="openshift-must-gather-pd2j4/crc-debug-ww6jp" Nov 25 11:15:49 crc kubenswrapper[4769]: I1125 11:15:49.520760 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dc574b28-b81a-4c11-a88d-2b6aa034922f-host\") pod \"crc-debug-ww6jp\" (UID: \"dc574b28-b81a-4c11-a88d-2b6aa034922f\") " pod="openshift-must-gather-pd2j4/crc-debug-ww6jp" Nov 25 11:15:49 crc kubenswrapper[4769]: I1125 11:15:49.520906 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h4h5\" (UniqueName: \"kubernetes.io/projected/dc574b28-b81a-4c11-a88d-2b6aa034922f-kube-api-access-8h4h5\") pod \"crc-debug-ww6jp\" (UID: \"dc574b28-b81a-4c11-a88d-2b6aa034922f\") " pod="openshift-must-gather-pd2j4/crc-debug-ww6jp" Nov 25 11:15:49 crc kubenswrapper[4769]: I1125 11:15:49.520981 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dc574b28-b81a-4c11-a88d-2b6aa034922f-host\") pod \"crc-debug-ww6jp\" (UID: \"dc574b28-b81a-4c11-a88d-2b6aa034922f\") " pod="openshift-must-gather-pd2j4/crc-debug-ww6jp" Nov 25 11:15:49 crc kubenswrapper[4769]: I1125 11:15:49.539942 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h4h5\" (UniqueName: \"kubernetes.io/projected/dc574b28-b81a-4c11-a88d-2b6aa034922f-kube-api-access-8h4h5\") pod \"crc-debug-ww6jp\" (UID: \"dc574b28-b81a-4c11-a88d-2b6aa034922f\") " pod="openshift-must-gather-pd2j4/crc-debug-ww6jp" Nov 25 11:15:49 crc kubenswrapper[4769]: I1125 11:15:49.664418 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/crc-debug-ww6jp" Nov 25 11:15:50 crc kubenswrapper[4769]: I1125 11:15:50.281917 4769 generic.go:334] "Generic (PLEG): container finished" podID="dc574b28-b81a-4c11-a88d-2b6aa034922f" containerID="da0327adaa6aaf801b6bbb5a4c86d1dd3575bbb0238cb7ea1df95a6f8e6f7707" exitCode=0 Nov 25 11:15:50 crc kubenswrapper[4769]: I1125 11:15:50.282003 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pd2j4/crc-debug-ww6jp" event={"ID":"dc574b28-b81a-4c11-a88d-2b6aa034922f","Type":"ContainerDied","Data":"da0327adaa6aaf801b6bbb5a4c86d1dd3575bbb0238cb7ea1df95a6f8e6f7707"} Nov 25 11:15:50 crc kubenswrapper[4769]: I1125 11:15:50.282376 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pd2j4/crc-debug-ww6jp" event={"ID":"dc574b28-b81a-4c11-a88d-2b6aa034922f","Type":"ContainerStarted","Data":"99ffb5facec89987eaf5c0e5efeecfac184a3a7051e15b09102bcf394a25af94"} Nov 25 11:15:50 crc kubenswrapper[4769]: I1125 11:15:50.331528 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pd2j4/crc-debug-ww6jp"] Nov 25 11:15:50 crc kubenswrapper[4769]: I1125 11:15:50.345396 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pd2j4/crc-debug-ww6jp"] Nov 25 11:15:51 crc kubenswrapper[4769]: I1125 11:15:51.418753 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/crc-debug-ww6jp" Nov 25 11:15:51 crc kubenswrapper[4769]: I1125 11:15:51.567403 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8h4h5\" (UniqueName: \"kubernetes.io/projected/dc574b28-b81a-4c11-a88d-2b6aa034922f-kube-api-access-8h4h5\") pod \"dc574b28-b81a-4c11-a88d-2b6aa034922f\" (UID: \"dc574b28-b81a-4c11-a88d-2b6aa034922f\") " Nov 25 11:15:51 crc kubenswrapper[4769]: I1125 11:15:51.567441 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dc574b28-b81a-4c11-a88d-2b6aa034922f-host\") pod \"dc574b28-b81a-4c11-a88d-2b6aa034922f\" (UID: \"dc574b28-b81a-4c11-a88d-2b6aa034922f\") " Nov 25 11:15:51 crc kubenswrapper[4769]: I1125 11:15:51.568032 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dc574b28-b81a-4c11-a88d-2b6aa034922f-host" (OuterVolumeSpecName: "host") pod "dc574b28-b81a-4c11-a88d-2b6aa034922f" (UID: "dc574b28-b81a-4c11-a88d-2b6aa034922f"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:15:51 crc kubenswrapper[4769]: I1125 11:15:51.573389 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc574b28-b81a-4c11-a88d-2b6aa034922f-kube-api-access-8h4h5" (OuterVolumeSpecName: "kube-api-access-8h4h5") pod "dc574b28-b81a-4c11-a88d-2b6aa034922f" (UID: "dc574b28-b81a-4c11-a88d-2b6aa034922f"). InnerVolumeSpecName "kube-api-access-8h4h5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:15:51 crc kubenswrapper[4769]: I1125 11:15:51.670804 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8h4h5\" (UniqueName: \"kubernetes.io/projected/dc574b28-b81a-4c11-a88d-2b6aa034922f-kube-api-access-8h4h5\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:51 crc kubenswrapper[4769]: I1125 11:15:51.670842 4769 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dc574b28-b81a-4c11-a88d-2b6aa034922f-host\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:52 crc kubenswrapper[4769]: I1125 11:15:52.053510 4769 scope.go:117] "RemoveContainer" containerID="6a0f31a303cbac1952aaceb147ea5c206337f05102ed0c6483a942c0b3bd912a" Nov 25 11:15:52 crc kubenswrapper[4769]: I1125 11:15:52.078881 4769 scope.go:117] "RemoveContainer" containerID="752cb86da8fecd9cd08ae2eb2fb21f293e404c471b4243d3fc085b86ae4ad013" Nov 25 11:15:52 crc kubenswrapper[4769]: I1125 11:15:52.136321 4769 scope.go:117] "RemoveContainer" containerID="d0fc9c539b2bf9a3d924a390bfa07f112b874862d2ba3b2ce19a8ff8b60acc05" Nov 25 11:15:52 crc kubenswrapper[4769]: I1125 11:15:52.185165 4769 scope.go:117] "RemoveContainer" containerID="667cf0a9aad845523b83761ec7e6b90f49da63f32b87d16da5405b0dc2032a15" Nov 25 11:15:52 crc kubenswrapper[4769]: I1125 11:15:52.213346 4769 scope.go:117] "RemoveContainer" containerID="2846aa87a8fb70c18047658bbab361766ef69a9d93f19ad2dc263e2db41e2321" Nov 25 11:15:52 crc kubenswrapper[4769]: I1125 11:15:52.239171 4769 scope.go:117] "RemoveContainer" containerID="86be9b2a0082a70d05cd99924bcb2f3f2f50320fc099212413129a320bbc6206" Nov 25 11:15:52 crc kubenswrapper[4769]: I1125 11:15:52.276401 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc574b28-b81a-4c11-a88d-2b6aa034922f" path="/var/lib/kubelet/pods/dc574b28-b81a-4c11-a88d-2b6aa034922f/volumes" Nov 25 11:15:52 crc kubenswrapper[4769]: I1125 11:15:52.276586 4769 scope.go:117] "RemoveContainer" containerID="d19f0987c306e9ccf6983d6f68aa32b10622159f9e820112ac1eef4fdd935704" Nov 25 11:15:52 crc kubenswrapper[4769]: I1125 11:15:52.310577 4769 scope.go:117] "RemoveContainer" containerID="1c744ee3bfbaaa52bb9931f19c5a7da7ce606ce6963f8f5453d6d0c393f93146" Nov 25 11:15:52 crc kubenswrapper[4769]: I1125 11:15:52.332045 4769 scope.go:117] "RemoveContainer" containerID="da0327adaa6aaf801b6bbb5a4c86d1dd3575bbb0238cb7ea1df95a6f8e6f7707" Nov 25 11:15:52 crc kubenswrapper[4769]: I1125 11:15:52.332187 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/crc-debug-ww6jp" Nov 25 11:15:52 crc kubenswrapper[4769]: I1125 11:15:52.396509 4769 scope.go:117] "RemoveContainer" containerID="644c2c018e722cfbb1475b24513cb7a92abcf292944dbd52c593c76c72401d3d" Nov 25 11:16:17 crc kubenswrapper[4769]: I1125 11:16:17.854119 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_19ab0e31-61ae-4868-8a6a-77354302637c/aodh-api/0.log" Nov 25 11:16:18 crc kubenswrapper[4769]: I1125 11:16:18.094318 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_19ab0e31-61ae-4868-8a6a-77354302637c/aodh-evaluator/0.log" Nov 25 11:16:18 crc kubenswrapper[4769]: I1125 11:16:18.104105 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_19ab0e31-61ae-4868-8a6a-77354302637c/aodh-notifier/0.log" Nov 25 11:16:18 crc kubenswrapper[4769]: I1125 11:16:18.124918 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_19ab0e31-61ae-4868-8a6a-77354302637c/aodh-listener/0.log" Nov 25 11:16:18 crc kubenswrapper[4769]: I1125 11:16:18.312444 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-56bfd676cd-c9dgn_28aedc7b-6594-4c9d-9b2e-ea66937919f2/barbican-api/0.log" Nov 25 11:16:18 crc kubenswrapper[4769]: I1125 11:16:18.336552 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-56bfd676cd-c9dgn_28aedc7b-6594-4c9d-9b2e-ea66937919f2/barbican-api-log/0.log" Nov 25 11:16:18 crc kubenswrapper[4769]: I1125 11:16:18.648806 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7ff7bc75d-dxtln_e7bfc1de-a2ad-4c42-ab54-a4df40fdd797/barbican-keystone-listener/0.log" Nov 25 11:16:18 crc kubenswrapper[4769]: I1125 11:16:18.742009 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-74bd96895f-7gn8z_1d5d3ba2-441b-4a94-974d-142811c1d2b4/barbican-worker/0.log" Nov 25 11:16:18 crc kubenswrapper[4769]: I1125 11:16:18.850911 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-74bd96895f-7gn8z_1d5d3ba2-441b-4a94-974d-142811c1d2b4/barbican-worker-log/0.log" Nov 25 11:16:18 crc kubenswrapper[4769]: I1125 11:16:18.863429 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-7ff7bc75d-dxtln_e7bfc1de-a2ad-4c42-ab54-a4df40fdd797/barbican-keystone-listener-log/0.log" Nov 25 11:16:19 crc kubenswrapper[4769]: I1125 11:16:19.010435 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-dffv6_77bb348e-56c3-4597-917d-5d918bfad3ca/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:19 crc kubenswrapper[4769]: I1125 11:16:19.216341 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_eb894707-cfa1-4716-a991-31992d8cff88/ceilometer-central-agent/1.log" Nov 25 11:16:19 crc kubenswrapper[4769]: I1125 11:16:19.290901 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_eb894707-cfa1-4716-a991-31992d8cff88/ceilometer-central-agent/0.log" Nov 25 11:16:19 crc kubenswrapper[4769]: I1125 11:16:19.299919 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_eb894707-cfa1-4716-a991-31992d8cff88/ceilometer-notification-agent/0.log" Nov 25 11:16:19 crc kubenswrapper[4769]: I1125 11:16:19.406030 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_eb894707-cfa1-4716-a991-31992d8cff88/proxy-httpd/0.log" Nov 25 11:16:19 crc kubenswrapper[4769]: I1125 11:16:19.458475 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_eb894707-cfa1-4716-a991-31992d8cff88/sg-core/0.log" Nov 25 11:16:19 crc kubenswrapper[4769]: I1125 11:16:19.628997 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_87066e63-7bf0-47dd-a601-88c880a1b5e4/cinder-api/0.log" Nov 25 11:16:19 crc kubenswrapper[4769]: I1125 11:16:19.721241 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_87066e63-7bf0-47dd-a601-88c880a1b5e4/cinder-api-log/0.log" Nov 25 11:16:19 crc kubenswrapper[4769]: I1125 11:16:19.824600 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_18a4a142-bb1b-4e44-9110-6a6e15b86b0d/cinder-scheduler/1.log" Nov 25 11:16:19 crc kubenswrapper[4769]: I1125 11:16:19.876058 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_18a4a142-bb1b-4e44-9110-6a6e15b86b0d/cinder-scheduler/0.log" Nov 25 11:16:19 crc kubenswrapper[4769]: I1125 11:16:19.949101 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_18a4a142-bb1b-4e44-9110-6a6e15b86b0d/probe/0.log" Nov 25 11:16:20 crc kubenswrapper[4769]: I1125 11:16:20.054424 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-56djw_ec573501-99b0-4833-828d-ac951684f714/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:20 crc kubenswrapper[4769]: I1125 11:16:20.205005 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-nwds5_fcf2a670-6bb0-4fbd-bb3a-a681699fec4a/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:20 crc kubenswrapper[4769]: I1125 11:16:20.740998 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-p89c2_c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0/init/0.log" Nov 25 11:16:20 crc kubenswrapper[4769]: I1125 11:16:20.982486 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-p89c2_c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0/init/0.log" Nov 25 11:16:21 crc kubenswrapper[4769]: I1125 11:16:21.108219 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-xknzf_d10dc8b2-044a-4829-b8bd-54559166b436/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:21 crc kubenswrapper[4769]: I1125 11:16:21.109800 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-bb85b8995-p89c2_c6c5e56a-cbe5-4d31-86a5-96cb5761c5b0/dnsmasq-dns/0.log" Nov 25 11:16:21 crc kubenswrapper[4769]: I1125 11:16:21.270209 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_e4dc076f-e68c-4c7e-ae16-f8576de44f48/glance-httpd/0.log" Nov 25 11:16:21 crc kubenswrapper[4769]: I1125 11:16:21.348051 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_e4dc076f-e68c-4c7e-ae16-f8576de44f48/glance-log/0.log" Nov 25 11:16:21 crc kubenswrapper[4769]: I1125 11:16:21.467228 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_f01554f0-8651-4121-99d2-65725f73ad2b/glance-httpd/0.log" Nov 25 11:16:21 crc kubenswrapper[4769]: I1125 11:16:21.678380 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_f01554f0-8651-4121-99d2-65725f73ad2b/glance-log/0.log" Nov 25 11:16:22 crc kubenswrapper[4769]: I1125 11:16:22.008933 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-5479947c5b-lpmd8_6cea603a-b499-42a3-a881-c783ecb978a5/heat-engine/0.log" Nov 25 11:16:22 crc kubenswrapper[4769]: I1125 11:16:22.426571 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-54cdcc7844-z2pdq_c54a9d3c-ee22-4f2a-9988-14600cbaf86d/heat-api/0.log" Nov 25 11:16:22 crc kubenswrapper[4769]: I1125 11:16:22.465300 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-55bb67dcdf-bnzxj_40323897-a2ff-4536-af1c-e0777ba05b89/heat-cfnapi/0.log" Nov 25 11:16:23 crc kubenswrapper[4769]: I1125 11:16:23.077721 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-v8z6j_cfbf9a07-a076-48a8-a458-743efaef316d/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:23 crc kubenswrapper[4769]: I1125 11:16:23.127607 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-m8dzd_78067229-e9ea-41be-ae04-26579218f6d1/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:23 crc kubenswrapper[4769]: I1125 11:16:23.163550 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401141-t6zhj_c7d20fa4-f9ca-4d1f-b3fe-7a2d2f98ebe9/keystone-cron/0.log" Nov 25 11:16:23 crc kubenswrapper[4769]: I1125 11:16:23.365640 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9/kube-state-metrics/3.log" Nov 25 11:16:23 crc kubenswrapper[4769]: I1125 11:16:23.537450 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_f8ee95ae-2e66-49b7-9c2d-c5feb22dd5a9/kube-state-metrics/2.log" Nov 25 11:16:23 crc kubenswrapper[4769]: I1125 11:16:23.754528 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-7k57k_1d2d8eea-8aae-4e92-8d69-630eec1d95b9/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:23 crc kubenswrapper[4769]: I1125 11:16:23.833925 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_logging-edpm-deployment-openstack-edpm-ipam-mrb5n_1a1e04d8-9fba-4f14-a9ac-845e6bd82fcb/logging-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:24 crc kubenswrapper[4769]: I1125 11:16:24.079195 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_f7b3065e-13cc-4b73-a8e8-7c0c8a07379d/mysqld-exporter/0.log" Nov 25 11:16:24 crc kubenswrapper[4769]: I1125 11:16:24.508712 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5c9685d5f7-xbblq_bbd368c8-da9f-4504-89e4-ffc922d67c32/neutron-httpd/0.log" Nov 25 11:16:24 crc kubenswrapper[4769]: I1125 11:16:24.652554 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5c9685d5f7-xbblq_bbd368c8-da9f-4504-89e4-ffc922d67c32/neutron-api/0.log" Nov 25 11:16:24 crc kubenswrapper[4769]: I1125 11:16:24.724943 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-mrj75_84dc9d02-ba7e-4b1e-8b1b-24654611c9ba/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:25 crc kubenswrapper[4769]: I1125 11:16:25.421290 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_3a4d6dd4-fef8-443c-9266-9641e672100e/nova-api-log/0.log" Nov 25 11:16:25 crc kubenswrapper[4769]: I1125 11:16:25.492404 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_85732feb-40a3-4155-aa20-fd8d6207b357/nova-cell0-conductor-conductor/0.log" Nov 25 11:16:25 crc kubenswrapper[4769]: I1125 11:16:25.826447 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_3a4d6dd4-fef8-443c-9266-9641e672100e/nova-api-api/0.log" Nov 25 11:16:25 crc kubenswrapper[4769]: I1125 11:16:25.964627 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_f5e88fd3-747e-4e1c-8825-bacafa9d3530/nova-cell1-conductor-conductor/0.log" Nov 25 11:16:26 crc kubenswrapper[4769]: I1125 11:16:26.166714 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_d4eb5499-66c0-466e-92ab-1f2a223d1f35/nova-cell1-novncproxy-novncproxy/0.log" Nov 25 11:16:26 crc kubenswrapper[4769]: I1125 11:16:26.432183 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-r4zms_88ab5006-f5a9-4c7c-b4c9-7b74e12e0ad7/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:26 crc kubenswrapper[4769]: I1125 11:16:26.620882 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_7c17c0cb-e73b-466d-8fae-ad581561fcb0/nova-metadata-log/0.log" Nov 25 11:16:26 crc kubenswrapper[4769]: I1125 11:16:26.771853 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-d4dbd5b55-g2prp_43d17ac5-45bb-488c-b28e-d076b6f279d8/keystone-api/0.log" Nov 25 11:16:26 crc kubenswrapper[4769]: I1125 11:16:26.944389 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_8e360825-f56f-4e69-9e17-c9e78f295267/mysql-bootstrap/0.log" Nov 25 11:16:27 crc kubenswrapper[4769]: I1125 11:16:27.051271 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_25cfece4-fd70-4c52-88c9-f29be558844f/nova-scheduler-scheduler/0.log" Nov 25 11:16:27 crc kubenswrapper[4769]: I1125 11:16:27.254077 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_8e360825-f56f-4e69-9e17-c9e78f295267/galera/0.log" Nov 25 11:16:27 crc kubenswrapper[4769]: I1125 11:16:27.297851 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_8e360825-f56f-4e69-9e17-c9e78f295267/mysql-bootstrap/0.log" Nov 25 11:16:27 crc kubenswrapper[4769]: I1125 11:16:27.501646 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_52438bf8-8800-4078-bc88-63033a83dd2e/mysql-bootstrap/0.log" Nov 25 11:16:27 crc kubenswrapper[4769]: I1125 11:16:27.688556 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_52438bf8-8800-4078-bc88-63033a83dd2e/mysql-bootstrap/0.log" Nov 25 11:16:27 crc kubenswrapper[4769]: I1125 11:16:27.756239 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_52438bf8-8800-4078-bc88-63033a83dd2e/galera/0.log" Nov 25 11:16:27 crc kubenswrapper[4769]: I1125 11:16:27.761473 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_52438bf8-8800-4078-bc88-63033a83dd2e/galera/1.log" Nov 25 11:16:27 crc kubenswrapper[4769]: I1125 11:16:27.992222 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_be068e15-9a8b-472c-9a66-8ee06cf2491f/openstackclient/0.log" Nov 25 11:16:28 crc kubenswrapper[4769]: I1125 11:16:28.210228 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-whsjc_46820746-3540-4da9-9bf3-b43df2d4d66d/openstack-network-exporter/0.log" Nov 25 11:16:28 crc kubenswrapper[4769]: I1125 11:16:28.316599 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2qznl_2d073f77-c21a-401e-8fd2-e6f08d2bcf17/ovsdb-server-init/0.log" Nov 25 11:16:28 crc kubenswrapper[4769]: I1125 11:16:28.543767 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2qznl_2d073f77-c21a-401e-8fd2-e6f08d2bcf17/ovs-vswitchd/0.log" Nov 25 11:16:28 crc kubenswrapper[4769]: I1125 11:16:28.593673 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2qznl_2d073f77-c21a-401e-8fd2-e6f08d2bcf17/ovsdb-server/0.log" Nov 25 11:16:28 crc kubenswrapper[4769]: I1125 11:16:28.612762 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-2qznl_2d073f77-c21a-401e-8fd2-e6f08d2bcf17/ovsdb-server-init/0.log" Nov 25 11:16:28 crc kubenswrapper[4769]: I1125 11:16:28.767278 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-tnz6t_a94c59b5-e672-4d10-a090-89fa82afc1f3/ovn-controller/0.log" Nov 25 11:16:28 crc kubenswrapper[4769]: I1125 11:16:28.856931 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_7c17c0cb-e73b-466d-8fae-ad581561fcb0/nova-metadata-metadata/0.log" Nov 25 11:16:29 crc kubenswrapper[4769]: I1125 11:16:29.001655 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-dxprm_92aecc12-4568-4b69-bda9-58101a2b6083/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:29 crc kubenswrapper[4769]: I1125 11:16:29.151059 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_5a0cfe48-e8db-4d53-b3a8-68c5e724538a/openstack-network-exporter/0.log" Nov 25 11:16:29 crc kubenswrapper[4769]: I1125 11:16:29.155001 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_5a0cfe48-e8db-4d53-b3a8-68c5e724538a/ovn-northd/0.log" Nov 25 11:16:29 crc kubenswrapper[4769]: I1125 11:16:29.283207 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_8c646d86-e48c-4d40-8370-1736c484875f/memcached/0.log" Nov 25 11:16:29 crc kubenswrapper[4769]: I1125 11:16:29.332458 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_80bf45ba-a6bb-4e24-86c8-1234bb9f61f0/openstack-network-exporter/0.log" Nov 25 11:16:29 crc kubenswrapper[4769]: I1125 11:16:29.402844 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_80bf45ba-a6bb-4e24-86c8-1234bb9f61f0/ovsdbserver-nb/0.log" Nov 25 11:16:29 crc kubenswrapper[4769]: I1125 11:16:29.489109 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_949195eb-241c-44a0-a5ba-21fadd596967/openstack-network-exporter/0.log" Nov 25 11:16:29 crc kubenswrapper[4769]: I1125 11:16:29.520989 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_949195eb-241c-44a0-a5ba-21fadd596967/ovsdbserver-sb/0.log" Nov 25 11:16:29 crc kubenswrapper[4769]: I1125 11:16:29.661717 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-68856897d4-664jr_e3e7e492-3f47-4688-b589-0c61adf29521/placement-api/0.log" Nov 25 11:16:29 crc kubenswrapper[4769]: I1125 11:16:29.749900 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ef4c37f-e3dc-4994-92bf-b41e7c215ef1/init-config-reloader/0.log" Nov 25 11:16:29 crc kubenswrapper[4769]: I1125 11:16:29.774302 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-68856897d4-664jr_e3e7e492-3f47-4688-b589-0c61adf29521/placement-log/0.log" Nov 25 11:16:29 crc kubenswrapper[4769]: I1125 11:16:29.988170 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ef4c37f-e3dc-4994-92bf-b41e7c215ef1/config-reloader/0.log" Nov 25 11:16:30 crc kubenswrapper[4769]: I1125 11:16:30.011283 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ef4c37f-e3dc-4994-92bf-b41e7c215ef1/thanos-sidecar/0.log" Nov 25 11:16:30 crc kubenswrapper[4769]: I1125 11:16:30.016540 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ef4c37f-e3dc-4994-92bf-b41e7c215ef1/init-config-reloader/0.log" Nov 25 11:16:30 crc kubenswrapper[4769]: I1125 11:16:30.019678 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_8ef4c37f-e3dc-4994-92bf-b41e7c215ef1/prometheus/0.log" Nov 25 11:16:30 crc kubenswrapper[4769]: I1125 11:16:30.173548 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_df64d497-1d94-46ba-b773-da7ade77177a/setup-container/0.log" Nov 25 11:16:30 crc kubenswrapper[4769]: I1125 11:16:30.356792 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_df64d497-1d94-46ba-b773-da7ade77177a/rabbitmq/0.log" Nov 25 11:16:30 crc kubenswrapper[4769]: I1125 11:16:30.358417 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_df64d497-1d94-46ba-b773-da7ade77177a/setup-container/0.log" Nov 25 11:16:30 crc kubenswrapper[4769]: I1125 11:16:30.407642 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8b992919-0b8a-425d-9e1a-aec914a91965/setup-container/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.063048 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8b992919-0b8a-425d-9e1a-aec914a91965/setup-container/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.101276 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8b992919-0b8a-425d-9e1a-aec914a91965/rabbitmq/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.143601 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-4snp4_3ad5b758-8fdd-4274-887d-34859eb94736/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.265713 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-ndp8l_080006ea-2bcf-4a74-9daa-6e11cf96a8e2/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.328231 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-n65q2_56107e4b-15d6-4358-ac81-c6bcc8fcc737/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.378781 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-r8582_d3cada76-b876-4f41-b8de-dc02456762e1/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.502487 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-7tjkl_6dde7865-02ef-4557-948d-c5fb0ddf73ed/ssh-known-hosts-edpm-deployment/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.644156 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-657bc78fc7-ccwr8_5aee4062-7d9d-44f0-a07c-6f0704946803/proxy-server/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.703573 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-657bc78fc7-ccwr8_5aee4062-7d9d-44f0-a07c-6f0704946803/proxy-httpd/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.776574 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-mq88x_6e5767ed-9248-4df0-945f-817154feb33c/swift-ring-rebalance/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.856816 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/account-auditor/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.954402 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/account-reaper/0.log" Nov 25 11:16:31 crc kubenswrapper[4769]: I1125 11:16:31.992158 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/account-replicator/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.014258 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/account-server/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.071363 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/container-auditor/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.129089 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/container-replicator/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.198662 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/container-updater/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.200198 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/container-server/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.273626 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/object-auditor/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.306736 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/object-expirer/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.370625 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/object-replicator/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.421934 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/object-server/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.435616 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/object-updater/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.466258 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/rsync/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.512265 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_f7987cb4-5485-438f-bc01-c69e509b81a6/swift-recon-cron/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.673061 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-zsqjx_eef2221d-aa15-41d7-bb96-d2206eef00fb/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:32 crc kubenswrapper[4769]: I1125 11:16:32.731384 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-power-monitoring-edpm-deployment-openstack-edpm-n2pnw_116b0564-3e29-4358-a713-663b8f4a156a/telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:33 crc kubenswrapper[4769]: I1125 11:16:33.477770 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-c8rg2_93dbc090-2167-43d9-bddd-d38e311a27ec/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 11:16:33 crc kubenswrapper[4769]: I1125 11:16:33.519456 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_138fc3df-1668-45d3-84d4-ccd0743bbcdf/test-operator-logs-container/0.log" Nov 25 11:16:33 crc kubenswrapper[4769]: I1125 11:16:33.575801 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_03f7a5db-5b5b-4107-ab58-1fc92bea67a1/tempest-tests-tempest-tests-runner/0.log" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.641026 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2vxrf"] Nov 25 11:16:37 crc kubenswrapper[4769]: E1125 11:16:37.641833 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc574b28-b81a-4c11-a88d-2b6aa034922f" containerName="container-00" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.641846 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc574b28-b81a-4c11-a88d-2b6aa034922f" containerName="container-00" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.642172 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc574b28-b81a-4c11-a88d-2b6aa034922f" containerName="container-00" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.643844 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.656803 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vxrf"] Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.675689 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0419c8b6-617f-4ad8-9135-16b0e708fa1d-catalog-content\") pod \"redhat-marketplace-2vxrf\" (UID: \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\") " pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.675752 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0419c8b6-617f-4ad8-9135-16b0e708fa1d-utilities\") pod \"redhat-marketplace-2vxrf\" (UID: \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\") " pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.675845 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh5n6\" (UniqueName: \"kubernetes.io/projected/0419c8b6-617f-4ad8-9135-16b0e708fa1d-kube-api-access-mh5n6\") pod \"redhat-marketplace-2vxrf\" (UID: \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\") " pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.777756 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh5n6\" (UniqueName: \"kubernetes.io/projected/0419c8b6-617f-4ad8-9135-16b0e708fa1d-kube-api-access-mh5n6\") pod \"redhat-marketplace-2vxrf\" (UID: \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\") " pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.777906 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0419c8b6-617f-4ad8-9135-16b0e708fa1d-catalog-content\") pod \"redhat-marketplace-2vxrf\" (UID: \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\") " pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.777951 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0419c8b6-617f-4ad8-9135-16b0e708fa1d-utilities\") pod \"redhat-marketplace-2vxrf\" (UID: \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\") " pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.778466 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0419c8b6-617f-4ad8-9135-16b0e708fa1d-utilities\") pod \"redhat-marketplace-2vxrf\" (UID: \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\") " pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.778555 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0419c8b6-617f-4ad8-9135-16b0e708fa1d-catalog-content\") pod \"redhat-marketplace-2vxrf\" (UID: \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\") " pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.798025 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh5n6\" (UniqueName: \"kubernetes.io/projected/0419c8b6-617f-4ad8-9135-16b0e708fa1d-kube-api-access-mh5n6\") pod \"redhat-marketplace-2vxrf\" (UID: \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\") " pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:37 crc kubenswrapper[4769]: I1125 11:16:37.974309 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:38 crc kubenswrapper[4769]: I1125 11:16:38.516687 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vxrf"] Nov 25 11:16:38 crc kubenswrapper[4769]: I1125 11:16:38.934648 4769 generic.go:334] "Generic (PLEG): container finished" podID="0419c8b6-617f-4ad8-9135-16b0e708fa1d" containerID="f9acfa4bef3e153fbb508abf1732aa1101f4900964d81be57271cab719c620d5" exitCode=0 Nov 25 11:16:38 crc kubenswrapper[4769]: I1125 11:16:38.934907 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vxrf" event={"ID":"0419c8b6-617f-4ad8-9135-16b0e708fa1d","Type":"ContainerDied","Data":"f9acfa4bef3e153fbb508abf1732aa1101f4900964d81be57271cab719c620d5"} Nov 25 11:16:38 crc kubenswrapper[4769]: I1125 11:16:38.934989 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vxrf" event={"ID":"0419c8b6-617f-4ad8-9135-16b0e708fa1d","Type":"ContainerStarted","Data":"6707d9109c714e7e947dd51db4ec545814fdbaa5c845d81902f2342fcac3ff9e"} Nov 25 11:16:38 crc kubenswrapper[4769]: I1125 11:16:38.938257 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:16:40 crc kubenswrapper[4769]: I1125 11:16:40.963206 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vxrf" event={"ID":"0419c8b6-617f-4ad8-9135-16b0e708fa1d","Type":"ContainerStarted","Data":"1c0c1f4217fdad46bf48f6f90a56e2d49c4e995ebfc4352763873caf4c51c378"} Nov 25 11:16:41 crc kubenswrapper[4769]: I1125 11:16:41.985290 4769 generic.go:334] "Generic (PLEG): container finished" podID="0419c8b6-617f-4ad8-9135-16b0e708fa1d" containerID="1c0c1f4217fdad46bf48f6f90a56e2d49c4e995ebfc4352763873caf4c51c378" exitCode=0 Nov 25 11:16:41 crc kubenswrapper[4769]: I1125 11:16:41.985367 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vxrf" event={"ID":"0419c8b6-617f-4ad8-9135-16b0e708fa1d","Type":"ContainerDied","Data":"1c0c1f4217fdad46bf48f6f90a56e2d49c4e995ebfc4352763873caf4c51c378"} Nov 25 11:16:43 crc kubenswrapper[4769]: I1125 11:16:43.013835 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vxrf" event={"ID":"0419c8b6-617f-4ad8-9135-16b0e708fa1d","Type":"ContainerStarted","Data":"c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055"} Nov 25 11:16:43 crc kubenswrapper[4769]: I1125 11:16:43.039505 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2vxrf" podStartSLOduration=2.3952520010000002 podStartE2EDuration="6.039485513s" podCreationTimestamp="2025-11-25 11:16:37 +0000 UTC" firstStartedPulling="2025-11-25 11:16:38.936936879 +0000 UTC m=+5547.521909192" lastFinishedPulling="2025-11-25 11:16:42.581170381 +0000 UTC m=+5551.166142704" observedRunningTime="2025-11-25 11:16:43.02881863 +0000 UTC m=+5551.613790953" watchObservedRunningTime="2025-11-25 11:16:43.039485513 +0000 UTC m=+5551.624457826" Nov 25 11:16:47 crc kubenswrapper[4769]: I1125 11:16:47.975096 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:47 crc kubenswrapper[4769]: I1125 11:16:47.975491 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:48 crc kubenswrapper[4769]: I1125 11:16:48.039422 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:48 crc kubenswrapper[4769]: I1125 11:16:48.131924 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:48 crc kubenswrapper[4769]: I1125 11:16:48.294483 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vxrf"] Nov 25 11:16:50 crc kubenswrapper[4769]: I1125 11:16:50.105401 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2vxrf" podUID="0419c8b6-617f-4ad8-9135-16b0e708fa1d" containerName="registry-server" containerID="cri-o://c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055" gracePeriod=2 Nov 25 11:16:50 crc kubenswrapper[4769]: I1125 11:16:50.646674 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:50 crc kubenswrapper[4769]: I1125 11:16:50.713182 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mh5n6\" (UniqueName: \"kubernetes.io/projected/0419c8b6-617f-4ad8-9135-16b0e708fa1d-kube-api-access-mh5n6\") pod \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\" (UID: \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\") " Nov 25 11:16:50 crc kubenswrapper[4769]: I1125 11:16:50.713242 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0419c8b6-617f-4ad8-9135-16b0e708fa1d-utilities\") pod \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\" (UID: \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\") " Nov 25 11:16:50 crc kubenswrapper[4769]: I1125 11:16:50.713319 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0419c8b6-617f-4ad8-9135-16b0e708fa1d-catalog-content\") pod \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\" (UID: \"0419c8b6-617f-4ad8-9135-16b0e708fa1d\") " Nov 25 11:16:50 crc kubenswrapper[4769]: I1125 11:16:50.714329 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0419c8b6-617f-4ad8-9135-16b0e708fa1d-utilities" (OuterVolumeSpecName: "utilities") pod "0419c8b6-617f-4ad8-9135-16b0e708fa1d" (UID: "0419c8b6-617f-4ad8-9135-16b0e708fa1d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:16:50 crc kubenswrapper[4769]: I1125 11:16:50.729812 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0419c8b6-617f-4ad8-9135-16b0e708fa1d-kube-api-access-mh5n6" (OuterVolumeSpecName: "kube-api-access-mh5n6") pod "0419c8b6-617f-4ad8-9135-16b0e708fa1d" (UID: "0419c8b6-617f-4ad8-9135-16b0e708fa1d"). InnerVolumeSpecName "kube-api-access-mh5n6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:16:50 crc kubenswrapper[4769]: I1125 11:16:50.736475 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0419c8b6-617f-4ad8-9135-16b0e708fa1d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0419c8b6-617f-4ad8-9135-16b0e708fa1d" (UID: "0419c8b6-617f-4ad8-9135-16b0e708fa1d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:16:50 crc kubenswrapper[4769]: I1125 11:16:50.816621 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mh5n6\" (UniqueName: \"kubernetes.io/projected/0419c8b6-617f-4ad8-9135-16b0e708fa1d-kube-api-access-mh5n6\") on node \"crc\" DevicePath \"\"" Nov 25 11:16:50 crc kubenswrapper[4769]: I1125 11:16:50.816661 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0419c8b6-617f-4ad8-9135-16b0e708fa1d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:16:50 crc kubenswrapper[4769]: I1125 11:16:50.816671 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0419c8b6-617f-4ad8-9135-16b0e708fa1d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.129744 4769 generic.go:334] "Generic (PLEG): container finished" podID="0419c8b6-617f-4ad8-9135-16b0e708fa1d" containerID="c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055" exitCode=0 Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.129800 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2vxrf" Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.129798 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vxrf" event={"ID":"0419c8b6-617f-4ad8-9135-16b0e708fa1d","Type":"ContainerDied","Data":"c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055"} Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.129899 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2vxrf" event={"ID":"0419c8b6-617f-4ad8-9135-16b0e708fa1d","Type":"ContainerDied","Data":"6707d9109c714e7e947dd51db4ec545814fdbaa5c845d81902f2342fcac3ff9e"} Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.129923 4769 scope.go:117] "RemoveContainer" containerID="c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055" Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.166738 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vxrf"] Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.171896 4769 scope.go:117] "RemoveContainer" containerID="1c0c1f4217fdad46bf48f6f90a56e2d49c4e995ebfc4352763873caf4c51c378" Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.181828 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2vxrf"] Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.192296 4769 scope.go:117] "RemoveContainer" containerID="f9acfa4bef3e153fbb508abf1732aa1101f4900964d81be57271cab719c620d5" Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.262106 4769 scope.go:117] "RemoveContainer" containerID="c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055" Nov 25 11:16:51 crc kubenswrapper[4769]: E1125 11:16:51.262599 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055\": container with ID starting with c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055 not found: ID does not exist" containerID="c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055" Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.262643 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055"} err="failed to get container status \"c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055\": rpc error: code = NotFound desc = could not find container \"c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055\": container with ID starting with c9d3557f0db8adf55ab8ea6da70470968e6768729fa82382c79bdb8e4c310055 not found: ID does not exist" Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.262675 4769 scope.go:117] "RemoveContainer" containerID="1c0c1f4217fdad46bf48f6f90a56e2d49c4e995ebfc4352763873caf4c51c378" Nov 25 11:16:51 crc kubenswrapper[4769]: E1125 11:16:51.263059 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c0c1f4217fdad46bf48f6f90a56e2d49c4e995ebfc4352763873caf4c51c378\": container with ID starting with 1c0c1f4217fdad46bf48f6f90a56e2d49c4e995ebfc4352763873caf4c51c378 not found: ID does not exist" containerID="1c0c1f4217fdad46bf48f6f90a56e2d49c4e995ebfc4352763873caf4c51c378" Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.263089 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c0c1f4217fdad46bf48f6f90a56e2d49c4e995ebfc4352763873caf4c51c378"} err="failed to get container status \"1c0c1f4217fdad46bf48f6f90a56e2d49c4e995ebfc4352763873caf4c51c378\": rpc error: code = NotFound desc = could not find container \"1c0c1f4217fdad46bf48f6f90a56e2d49c4e995ebfc4352763873caf4c51c378\": container with ID starting with 1c0c1f4217fdad46bf48f6f90a56e2d49c4e995ebfc4352763873caf4c51c378 not found: ID does not exist" Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.263104 4769 scope.go:117] "RemoveContainer" containerID="f9acfa4bef3e153fbb508abf1732aa1101f4900964d81be57271cab719c620d5" Nov 25 11:16:51 crc kubenswrapper[4769]: E1125 11:16:51.263283 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9acfa4bef3e153fbb508abf1732aa1101f4900964d81be57271cab719c620d5\": container with ID starting with f9acfa4bef3e153fbb508abf1732aa1101f4900964d81be57271cab719c620d5 not found: ID does not exist" containerID="f9acfa4bef3e153fbb508abf1732aa1101f4900964d81be57271cab719c620d5" Nov 25 11:16:51 crc kubenswrapper[4769]: I1125 11:16:51.263300 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9acfa4bef3e153fbb508abf1732aa1101f4900964d81be57271cab719c620d5"} err="failed to get container status \"f9acfa4bef3e153fbb508abf1732aa1101f4900964d81be57271cab719c620d5\": rpc error: code = NotFound desc = could not find container \"f9acfa4bef3e153fbb508abf1732aa1101f4900964d81be57271cab719c620d5\": container with ID starting with f9acfa4bef3e153fbb508abf1732aa1101f4900964d81be57271cab719c620d5 not found: ID does not exist" Nov 25 11:16:52 crc kubenswrapper[4769]: I1125 11:16:52.256215 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0419c8b6-617f-4ad8-9135-16b0e708fa1d" path="/var/lib/kubelet/pods/0419c8b6-617f-4ad8-9135-16b0e708fa1d/volumes" Nov 25 11:16:52 crc kubenswrapper[4769]: I1125 11:16:52.290325 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:16:52 crc kubenswrapper[4769]: I1125 11:16:52.290680 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:17:00 crc kubenswrapper[4769]: I1125 11:17:00.578540 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs_3069ed26-1a81-4909-b213-2b01fa737dd3/util/0.log" Nov 25 11:17:00 crc kubenswrapper[4769]: I1125 11:17:00.742739 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs_3069ed26-1a81-4909-b213-2b01fa737dd3/util/0.log" Nov 25 11:17:00 crc kubenswrapper[4769]: I1125 11:17:00.808951 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs_3069ed26-1a81-4909-b213-2b01fa737dd3/pull/0.log" Nov 25 11:17:00 crc kubenswrapper[4769]: I1125 11:17:00.822520 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs_3069ed26-1a81-4909-b213-2b01fa737dd3/pull/0.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.008580 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs_3069ed26-1a81-4909-b213-2b01fa737dd3/util/0.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.031159 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs_3069ed26-1a81-4909-b213-2b01fa737dd3/extract/0.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.049636 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_737bed4169c94da5e324fc8d9e35ae60aa44caf94ad8b2e26d4bf5d2436g8xs_3069ed26-1a81-4909-b213-2b01fa737dd3/pull/0.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.191174 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-zv5xr_b90cc789-8211-48bc-85cc-1a31ad1af486/kube-rbac-proxy/0.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.221255 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-zv5xr_b90cc789-8211-48bc-85cc-1a31ad1af486/manager/3.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.244934 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-zv5xr_b90cc789-8211-48bc-85cc-1a31ad1af486/manager/2.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.429949 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-2hp9n_aaa65e3e-75e2-4f50-b9d6-aa9710a6e394/kube-rbac-proxy/0.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.439507 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-2hp9n_aaa65e3e-75e2-4f50-b9d6-aa9710a6e394/manager/3.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.441927 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-79856dc55c-2hp9n_aaa65e3e-75e2-4f50-b9d6-aa9710a6e394/manager/2.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.612382 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-7xcjk_0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd/manager/3.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.631860 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-7xcjk_0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd/manager/2.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.657209 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-7xcjk_0f8a8be0-04e6-4ef8-b3ce-fb6ced595cbd/kube-rbac-proxy/0.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.804105 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-k2htr_4894deb0-65ca-4b42-b397-4092a75739c9/kube-rbac-proxy/0.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.821337 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-k2htr_4894deb0-65ca-4b42-b397-4092a75739c9/manager/3.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.850605 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-k2htr_4894deb0-65ca-4b42-b397-4092a75739c9/manager/2.log" Nov 25 11:17:01 crc kubenswrapper[4769]: I1125 11:17:01.975597 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-w8ftq_67abea47-5e8a-43a2-8865-929cfdfc607c/kube-rbac-proxy/0.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.006922 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-w8ftq_67abea47-5e8a-43a2-8865-929cfdfc607c/manager/3.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.035564 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-w8ftq_67abea47-5e8a-43a2-8865-929cfdfc607c/manager/2.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.215006 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-m2qxd_1da948d8-e834-488a-a3ec-a0c0229ebaf5/kube-rbac-proxy/0.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.246872 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-m2qxd_1da948d8-e834-488a-a3ec-a0c0229ebaf5/manager/2.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.247706 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-m2qxd_1da948d8-e834-488a-a3ec-a0c0229ebaf5/manager/3.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.399334 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-nktn6_136f8f90-5673-4a08-ab4b-c030c1c428a6/kube-rbac-proxy/0.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.449631 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-nktn6_136f8f90-5673-4a08-ab4b-c030c1c428a6/manager/3.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.454364 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-nktn6_136f8f90-5673-4a08-ab4b-c030c1c428a6/manager/2.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.615047 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-47gs9_b59cac8b-fb36-4316-ab83-da7202b67af5/kube-rbac-proxy/0.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.709053 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-47gs9_b59cac8b-fb36-4316-ab83-da7202b67af5/manager/3.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.712060 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-47gs9_b59cac8b-fb36-4316-ab83-da7202b67af5/manager/2.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.876852 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-hh9nc_1f5735dc-67e5-423c-9a8f-d42977c892d3/kube-rbac-proxy/0.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.945641 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-hh9nc_1f5735dc-67e5-423c-9a8f-d42977c892d3/manager/3.log" Nov 25 11:17:02 crc kubenswrapper[4769]: I1125 11:17:02.975524 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-hh9nc_1f5735dc-67e5-423c-9a8f-d42977c892d3/manager/2.log" Nov 25 11:17:03 crc kubenswrapper[4769]: I1125 11:17:03.150086 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-qdjsd_63e66921-47a7-407a-b50e-06cf5cadb8be/kube-rbac-proxy/0.log" Nov 25 11:17:03 crc kubenswrapper[4769]: I1125 11:17:03.234292 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-qdjsd_63e66921-47a7-407a-b50e-06cf5cadb8be/manager/2.log" Nov 25 11:17:03 crc kubenswrapper[4769]: I1125 11:17:03.241612 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-qdjsd_63e66921-47a7-407a-b50e-06cf5cadb8be/manager/3.log" Nov 25 11:17:03 crc kubenswrapper[4769]: I1125 11:17:03.446438 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-fknhz_9d0ef7c9-7421-4fe2-b1c8-551253bea174/manager/3.log" Nov 25 11:17:03 crc kubenswrapper[4769]: I1125 11:17:03.493401 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-fknhz_9d0ef7c9-7421-4fe2-b1c8-551253bea174/manager/2.log" Nov 25 11:17:03 crc kubenswrapper[4769]: I1125 11:17:03.507319 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-fknhz_9d0ef7c9-7421-4fe2-b1c8-551253bea174/kube-rbac-proxy/0.log" Nov 25 11:17:03 crc kubenswrapper[4769]: I1125 11:17:03.711678 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-rgn2z_6d9be953-34ea-4956-96bf-84d5f8babb2d/kube-rbac-proxy/0.log" Nov 25 11:17:03 crc kubenswrapper[4769]: I1125 11:17:03.714637 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-rgn2z_6d9be953-34ea-4956-96bf-84d5f8babb2d/manager/3.log" Nov 25 11:17:03 crc kubenswrapper[4769]: I1125 11:17:03.735519 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-rgn2z_6d9be953-34ea-4956-96bf-84d5f8babb2d/manager/2.log" Nov 25 11:17:03 crc kubenswrapper[4769]: I1125 11:17:03.945690 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-rcv4n_1b8cd25d-43dd-4774-b1d9-59572bb6bef7/kube-rbac-proxy/0.log" Nov 25 11:17:03 crc kubenswrapper[4769]: I1125 11:17:03.947738 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-rcv4n_1b8cd25d-43dd-4774-b1d9-59572bb6bef7/manager/2.log" Nov 25 11:17:03 crc kubenswrapper[4769]: I1125 11:17:03.996239 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-rcv4n_1b8cd25d-43dd-4774-b1d9-59572bb6bef7/manager/3.log" Nov 25 11:17:04 crc kubenswrapper[4769]: I1125 11:17:04.148242 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-kbhzd_88cb8ad7-c855-45eb-a471-aacb8c42082c/manager/3.log" Nov 25 11:17:04 crc kubenswrapper[4769]: I1125 11:17:04.165015 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-kbhzd_88cb8ad7-c855-45eb-a471-aacb8c42082c/kube-rbac-proxy/0.log" Nov 25 11:17:04 crc kubenswrapper[4769]: I1125 11:17:04.304917 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-kbhzd_88cb8ad7-c855-45eb-a471-aacb8c42082c/manager/2.log" Nov 25 11:17:04 crc kubenswrapper[4769]: I1125 11:17:04.341771 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7_169b77e8-e7b0-4d11-9915-2442f48d9347/kube-rbac-proxy/0.log" Nov 25 11:17:04 crc kubenswrapper[4769]: I1125 11:17:04.365542 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7_169b77e8-e7b0-4d11-9915-2442f48d9347/manager/1.log" Nov 25 11:17:04 crc kubenswrapper[4769]: I1125 11:17:04.450750 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-pxvh7_169b77e8-e7b0-4d11-9915-2442f48d9347/manager/0.log" Nov 25 11:17:04 crc kubenswrapper[4769]: I1125 11:17:04.565704 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6c6f9bd7cc-cp48g_70f81d0a-db58-4bd4-a0e2-ee1c03e2f923/manager/1.log" Nov 25 11:17:04 crc kubenswrapper[4769]: I1125 11:17:04.585413 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6c6f9bd7cc-cp48g_70f81d0a-db58-4bd4-a0e2-ee1c03e2f923/manager/2.log" Nov 25 11:17:04 crc kubenswrapper[4769]: I1125 11:17:04.722193 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7fb4d7987d-w6ztr_6520a852-60ef-47d1-800b-633eae1655dd/operator/1.log" Nov 25 11:17:04 crc kubenswrapper[4769]: I1125 11:17:04.785120 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-qln84_eeb84a4b-8771-40e8-842b-9a67b1044074/registry-server/0.log" Nov 25 11:17:04 crc kubenswrapper[4769]: I1125 11:17:04.922849 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-68bvf_a2f1ad69-27e4-4131-a742-a8d2c5df8636/kube-rbac-proxy/0.log" Nov 25 11:17:04 crc kubenswrapper[4769]: I1125 11:17:04.967270 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7fb4d7987d-w6ztr_6520a852-60ef-47d1-800b-633eae1655dd/operator/0.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.001391 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-68bvf_a2f1ad69-27e4-4131-a742-a8d2c5df8636/manager/2.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.006056 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-68bvf_a2f1ad69-27e4-4131-a742-a8d2c5df8636/manager/3.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.143563 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-g8xqz_1be22f03-8697-413b-922c-9344185c05c4/kube-rbac-proxy/0.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.147539 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-g8xqz_1be22f03-8697-413b-922c-9344185c05c4/manager/3.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.182712 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-g8xqz_1be22f03-8697-413b-922c-9344185c05c4/manager/2.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.306176 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-bzrbr_a032414e-4be2-47f7-ac88-3bdec0ccb151/operator/3.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.380918 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-bzrbr_a032414e-4be2-47f7-ac88-3bdec0ccb151/operator/2.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.381697 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-wssdn_18e1910e-52b2-439b-a93f-4ffe63a7b992/kube-rbac-proxy/0.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.427390 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-wssdn_18e1910e-52b2-439b-a93f-4ffe63a7b992/manager/3.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.529109 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-wssdn_18e1910e-52b2-439b-a93f-4ffe63a7b992/manager/2.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.558446 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-54cf759cb9-dcqfc_c1d16c0c-cca9-4794-8a52-c8674d9a069e/kube-rbac-proxy/0.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.600738 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-54cf759cb9-dcqfc_c1d16c0c-cca9-4794-8a52-c8674d9a069e/manager/2.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.619792 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-54cf759cb9-dcqfc_c1d16c0c-cca9-4794-8a52-c8674d9a069e/manager/1.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.762952 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-b25q9_c21f4ff5-86fa-44f0-993f-59189de57182/manager/1.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.785360 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-b25q9_c21f4ff5-86fa-44f0-993f-59189de57182/kube-rbac-proxy/0.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.793257 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-b25q9_c21f4ff5-86fa-44f0-993f-59189de57182/manager/0.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.935803 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-rz7fl_8e7436d0-2ff7-4a11-9ab8-74a91e56de4a/manager/3.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.941301 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-rz7fl_8e7436d0-2ff7-4a11-9ab8-74a91e56de4a/kube-rbac-proxy/0.log" Nov 25 11:17:05 crc kubenswrapper[4769]: I1125 11:17:05.982076 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-rz7fl_8e7436d0-2ff7-4a11-9ab8-74a91e56de4a/manager/2.log" Nov 25 11:17:22 crc kubenswrapper[4769]: I1125 11:17:22.290065 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:17:22 crc kubenswrapper[4769]: I1125 11:17:22.290662 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:17:24 crc kubenswrapper[4769]: I1125 11:17:24.699443 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-qz6sr_c3329bde-d97b-47a5-96dc-a033e3c4bc8c/control-plane-machine-set-operator/0.log" Nov 25 11:17:24 crc kubenswrapper[4769]: I1125 11:17:24.938200 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-twb6l_5351ce01-8233-4a92-b15d-befd4e57b0d2/machine-api-operator/0.log" Nov 25 11:17:24 crc kubenswrapper[4769]: I1125 11:17:24.938255 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-twb6l_5351ce01-8233-4a92-b15d-befd4e57b0d2/kube-rbac-proxy/0.log" Nov 25 11:17:37 crc kubenswrapper[4769]: I1125 11:17:37.058082 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-662dt_8002485a-0573-48c9-aeac-f2f5a05cb1ae/cert-manager-controller/0.log" Nov 25 11:17:37 crc kubenswrapper[4769]: I1125 11:17:37.090156 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-662dt_8002485a-0573-48c9-aeac-f2f5a05cb1ae/cert-manager-controller/1.log" Nov 25 11:17:37 crc kubenswrapper[4769]: I1125 11:17:37.249481 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-fwc8s_c4701907-0ccc-4866-bb98-6005539baa95/cert-manager-cainjector/1.log" Nov 25 11:17:37 crc kubenswrapper[4769]: I1125 11:17:37.271126 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-fwc8s_c4701907-0ccc-4866-bb98-6005539baa95/cert-manager-cainjector/2.log" Nov 25 11:17:37 crc kubenswrapper[4769]: I1125 11:17:37.395866 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-q4rr6_0c37859f-e681-4cfe-b687-967b67a62534/cert-manager-webhook/0.log" Nov 25 11:17:50 crc kubenswrapper[4769]: I1125 11:17:50.783773 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-cfwf7_b3842e94-b875-49e0-9041-51a64f4c496d/nmstate-console-plugin/0.log" Nov 25 11:17:51 crc kubenswrapper[4769]: I1125 11:17:51.083760 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-9ctgr_1b7e501c-cb93-4182-a15f-70db4cb62704/nmstate-handler/0.log" Nov 25 11:17:51 crc kubenswrapper[4769]: I1125 11:17:51.130764 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fvq2h_ee0bb8ab-c782-49d9-81eb-c2c0a6a8ce52/nmstate-metrics/0.log" Nov 25 11:17:51 crc kubenswrapper[4769]: I1125 11:17:51.176355 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fvq2h_ee0bb8ab-c782-49d9-81eb-c2c0a6a8ce52/kube-rbac-proxy/0.log" Nov 25 11:17:51 crc kubenswrapper[4769]: I1125 11:17:51.329156 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-fqxhf_6e6ef328-24f7-4e03-a863-e369abbf53e0/nmstate-operator/0.log" Nov 25 11:17:51 crc kubenswrapper[4769]: I1125 11:17:51.401764 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-zljjp_467be990-d3c4-447c-a994-9b7caf5c0b44/nmstate-webhook/0.log" Nov 25 11:17:52 crc kubenswrapper[4769]: I1125 11:17:52.290071 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:17:52 crc kubenswrapper[4769]: I1125 11:17:52.290123 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:17:52 crc kubenswrapper[4769]: I1125 11:17:52.290176 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 11:17:52 crc kubenswrapper[4769]: I1125 11:17:52.290999 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"774bc57830d3ea61329fafb65ae2f3776de79f07090f20a76a66522b46be9570"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 11:17:52 crc kubenswrapper[4769]: I1125 11:17:52.291047 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://774bc57830d3ea61329fafb65ae2f3776de79f07090f20a76a66522b46be9570" gracePeriod=600 Nov 25 11:17:52 crc kubenswrapper[4769]: I1125 11:17:52.880607 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="774bc57830d3ea61329fafb65ae2f3776de79f07090f20a76a66522b46be9570" exitCode=0 Nov 25 11:17:52 crc kubenswrapper[4769]: I1125 11:17:52.881115 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"774bc57830d3ea61329fafb65ae2f3776de79f07090f20a76a66522b46be9570"} Nov 25 11:17:52 crc kubenswrapper[4769]: I1125 11:17:52.881150 4769 scope.go:117] "RemoveContainer" containerID="4b939fc00d006c5c582da7cfcaed5ec1363b4453901fa7b7866cf6b4ff5b14f5" Nov 25 11:17:53 crc kubenswrapper[4769]: I1125 11:17:53.895874 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8"} Nov 25 11:18:05 crc kubenswrapper[4769]: I1125 11:18:05.155684 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-cbd48d4d7-8psr6_930d9174-b8de-465e-a1b7-b9aa7c498246/kube-rbac-proxy/0.log" Nov 25 11:18:05 crc kubenswrapper[4769]: I1125 11:18:05.174181 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-cbd48d4d7-8psr6_930d9174-b8de-465e-a1b7-b9aa7c498246/manager/1.log" Nov 25 11:18:05 crc kubenswrapper[4769]: I1125 11:18:05.418720 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-cbd48d4d7-8psr6_930d9174-b8de-465e-a1b7-b9aa7c498246/manager/0.log" Nov 25 11:18:21 crc kubenswrapper[4769]: I1125 11:18:21.177450 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-ff9846bd-84wrq_fd5f2609-5b84-4be6-9c54-725cd7879630/cluster-logging-operator/0.log" Nov 25 11:18:21 crc kubenswrapper[4769]: I1125 11:18:21.376621 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-f5b9l_c107b3e5-59bc-48d8-927b-565e46733679/collector/0.log" Nov 25 11:18:21 crc kubenswrapper[4769]: I1125 11:18:21.473010 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_a4405372-5512-4a21-9e58-569bdcd4389c/loki-compactor/0.log" Nov 25 11:18:21 crc kubenswrapper[4769]: I1125 11:18:21.566356 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-76cc67bf56-g29kd_163db859-0d28-48d8-b06a-f6a94e19479d/loki-distributor/0.log" Nov 25 11:18:21 crc kubenswrapper[4769]: I1125 11:18:21.709598 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-5bdd8fd454-8hzw5_daa191b3-4057-42fd-8c0c-d0aa065af77b/gateway/0.log" Nov 25 11:18:21 crc kubenswrapper[4769]: I1125 11:18:21.711360 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-5bdd8fd454-8hzw5_daa191b3-4057-42fd-8c0c-d0aa065af77b/opa/0.log" Nov 25 11:18:21 crc kubenswrapper[4769]: I1125 11:18:21.908684 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-5bdd8fd454-klwtc_f535e254-5602-4794-9f47-e9bb2c1454b2/gateway/0.log" Nov 25 11:18:21 crc kubenswrapper[4769]: I1125 11:18:21.923779 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-5bdd8fd454-klwtc_f535e254-5602-4794-9f47-e9bb2c1454b2/opa/0.log" Nov 25 11:18:21 crc kubenswrapper[4769]: I1125 11:18:21.963724 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_ba6e5673-c7e8-4242-b24e-85603e10e8ac/loki-index-gateway/0.log" Nov 25 11:18:22 crc kubenswrapper[4769]: I1125 11:18:22.196820 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_853f39bc-058d-47a2-82f1-827f485f11a5/loki-ingester/0.log" Nov 25 11:18:22 crc kubenswrapper[4769]: I1125 11:18:22.236165 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-5895d59bb8-tbhrj_6091a51e-9c46-48cd-bb3a-ff1f3c9aa965/loki-querier/0.log" Nov 25 11:18:22 crc kubenswrapper[4769]: I1125 11:18:22.376228 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-84558f7c9f-vdrhz_8f616f7b-7627-4878-9279-6d12b8ac3bb7/loki-query-frontend/0.log" Nov 25 11:18:36 crc kubenswrapper[4769]: I1125 11:18:36.546239 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-8k9g6_d64c9e2e-43f2-4ee9-b377-c32f743c034d/kube-rbac-proxy/0.log" Nov 25 11:18:36 crc kubenswrapper[4769]: I1125 11:18:36.728810 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-8k9g6_d64c9e2e-43f2-4ee9-b377-c32f743c034d/controller/0.log" Nov 25 11:18:36 crc kubenswrapper[4769]: I1125 11:18:36.741719 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-v89x9_8df830ab-ab7a-49cd-b7d4-72d44c99cc4f/frr-k8s-webhook-server/0.log" Nov 25 11:18:36 crc kubenswrapper[4769]: I1125 11:18:36.904084 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/cp-frr-files/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.112022 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/cp-metrics/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.113347 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/cp-frr-files/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.116704 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/cp-reloader/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.155717 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/cp-reloader/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.308227 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/cp-frr-files/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.319240 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/cp-reloader/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.362411 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/cp-metrics/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.375093 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/cp-metrics/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.523536 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/cp-reloader/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.550526 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/cp-metrics/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.551618 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/cp-frr-files/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.559621 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/controller/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.725334 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/frr-metrics/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.780208 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/kube-rbac-proxy/0.log" Nov 25 11:18:37 crc kubenswrapper[4769]: I1125 11:18:37.836227 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/kube-rbac-proxy-frr/0.log" Nov 25 11:18:38 crc kubenswrapper[4769]: I1125 11:18:38.005121 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/reloader/0.log" Nov 25 11:18:38 crc kubenswrapper[4769]: I1125 11:18:38.111283 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-64569bb78d-pzqdq_9441dbc7-716c-413e-b0ea-bf1ef05b1608/manager/3.log" Nov 25 11:18:38 crc kubenswrapper[4769]: I1125 11:18:38.236477 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-64569bb78d-pzqdq_9441dbc7-716c-413e-b0ea-bf1ef05b1608/manager/2.log" Nov 25 11:18:38 crc kubenswrapper[4769]: I1125 11:18:38.342175 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-55c9569f76-llhqr_395976a4-ddee-425d-994f-c913076f1710/webhook-server/0.log" Nov 25 11:18:38 crc kubenswrapper[4769]: I1125 11:18:38.556175 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-84t8t_ce9a407b-6a62-48d8-a15d-e08c1b09c3e3/kube-rbac-proxy/0.log" Nov 25 11:18:39 crc kubenswrapper[4769]: I1125 11:18:39.645798 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-84t8t_ce9a407b-6a62-48d8-a15d-e08c1b09c3e3/speaker/0.log" Nov 25 11:18:39 crc kubenswrapper[4769]: I1125 11:18:39.730445 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-zwpnn_aa14b8e2-159e-4850-8816-14bc635838ac/frr/0.log" Nov 25 11:18:52 crc kubenswrapper[4769]: I1125 11:18:52.622835 4769 scope.go:117] "RemoveContainer" containerID="97d3e2f5500f546826a966cbcf97eda09bab5c7837cf4b791ecb7db4bc235c6a" Nov 25 11:18:52 crc kubenswrapper[4769]: I1125 11:18:52.649307 4769 scope.go:117] "RemoveContainer" containerID="fc071f5e0723a066e50b815205af45d4795644cf612a0459af9b2cc2a7ceec74" Nov 25 11:18:52 crc kubenswrapper[4769]: I1125 11:18:52.650032 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85_029b4e7c-2a08-4a65-8226-904ac6eb536e/util/0.log" Nov 25 11:18:52 crc kubenswrapper[4769]: I1125 11:18:52.671100 4769 scope.go:117] "RemoveContainer" containerID="c321d22a1ae6fa2b8d2be1a746c67da60ad4d81030707dc7e25df63a208f8830" Nov 25 11:18:52 crc kubenswrapper[4769]: I1125 11:18:52.698735 4769 scope.go:117] "RemoveContainer" containerID="7bbb8e9e5221e6b1e6bd2d68d5535139e6efe50a4d3ba16c44a019da0fb5c1db" Nov 25 11:18:52 crc kubenswrapper[4769]: I1125 11:18:52.861208 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85_029b4e7c-2a08-4a65-8226-904ac6eb536e/util/0.log" Nov 25 11:18:52 crc kubenswrapper[4769]: I1125 11:18:52.862191 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85_029b4e7c-2a08-4a65-8226-904ac6eb536e/pull/0.log" Nov 25 11:18:52 crc kubenswrapper[4769]: I1125 11:18:52.898587 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85_029b4e7c-2a08-4a65-8226-904ac6eb536e/pull/0.log" Nov 25 11:18:53 crc kubenswrapper[4769]: I1125 11:18:53.093359 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85_029b4e7c-2a08-4a65-8226-904ac6eb536e/extract/0.log" Nov 25 11:18:53 crc kubenswrapper[4769]: I1125 11:18:53.123020 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85_029b4e7c-2a08-4a65-8226-904ac6eb536e/pull/0.log" Nov 25 11:18:53 crc kubenswrapper[4769]: I1125 11:18:53.137794 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb826l85_029b4e7c-2a08-4a65-8226-904ac6eb536e/util/0.log" Nov 25 11:18:53 crc kubenswrapper[4769]: I1125 11:18:53.405401 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8_5b4d5ade-9f98-49a6-a236-dada2b731c5a/util/0.log" Nov 25 11:18:53 crc kubenswrapper[4769]: I1125 11:18:53.605876 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8_5b4d5ade-9f98-49a6-a236-dada2b731c5a/util/0.log" Nov 25 11:18:53 crc kubenswrapper[4769]: I1125 11:18:53.634684 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8_5b4d5ade-9f98-49a6-a236-dada2b731c5a/pull/0.log" Nov 25 11:18:53 crc kubenswrapper[4769]: I1125 11:18:53.652702 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8_5b4d5ade-9f98-49a6-a236-dada2b731c5a/pull/0.log" Nov 25 11:18:53 crc kubenswrapper[4769]: I1125 11:18:53.808574 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8_5b4d5ade-9f98-49a6-a236-dada2b731c5a/util/0.log" Nov 25 11:18:53 crc kubenswrapper[4769]: I1125 11:18:53.843085 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8_5b4d5ade-9f98-49a6-a236-dada2b731c5a/extract/0.log" Nov 25 11:18:53 crc kubenswrapper[4769]: I1125 11:18:53.849482 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772efcvc8_5b4d5ade-9f98-49a6-a236-dada2b731c5a/pull/0.log" Nov 25 11:18:53 crc kubenswrapper[4769]: I1125 11:18:53.973720 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr_4c6c1e32-59b2-4376-950f-077aee09fff0/util/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.175479 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr_4c6c1e32-59b2-4376-950f-077aee09fff0/pull/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.207198 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr_4c6c1e32-59b2-4376-950f-077aee09fff0/util/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.230800 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr_4c6c1e32-59b2-4376-950f-077aee09fff0/pull/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.359664 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr_4c6c1e32-59b2-4376-950f-077aee09fff0/util/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.392685 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr_4c6c1e32-59b2-4376-950f-077aee09fff0/pull/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.408395 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210dfwqr_4c6c1e32-59b2-4376-950f-077aee09fff0/extract/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.550636 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr_0d4ddb62-f09e-495e-bad9-dc38999cf28d/util/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.743770 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr_0d4ddb62-f09e-495e-bad9-dc38999cf28d/pull/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.755563 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr_0d4ddb62-f09e-495e-bad9-dc38999cf28d/util/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.771818 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr_0d4ddb62-f09e-495e-bad9-dc38999cf28d/pull/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.946219 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr_0d4ddb62-f09e-495e-bad9-dc38999cf28d/util/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.981450 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr_0d4ddb62-f09e-495e-bad9-dc38999cf28d/pull/0.log" Nov 25 11:18:54 crc kubenswrapper[4769]: I1125 11:18:54.999481 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fcfrjr_0d4ddb62-f09e-495e-bad9-dc38999cf28d/extract/0.log" Nov 25 11:18:55 crc kubenswrapper[4769]: I1125 11:18:55.155678 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lxhnd_79614043-7894-49fb-a36f-24544c3653e0/extract-utilities/0.log" Nov 25 11:18:55 crc kubenswrapper[4769]: I1125 11:18:55.340025 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lxhnd_79614043-7894-49fb-a36f-24544c3653e0/extract-content/0.log" Nov 25 11:18:55 crc kubenswrapper[4769]: I1125 11:18:55.351319 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lxhnd_79614043-7894-49fb-a36f-24544c3653e0/extract-utilities/0.log" Nov 25 11:18:55 crc kubenswrapper[4769]: I1125 11:18:55.379246 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lxhnd_79614043-7894-49fb-a36f-24544c3653e0/extract-content/0.log" Nov 25 11:18:56 crc kubenswrapper[4769]: I1125 11:18:56.119910 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lxhnd_79614043-7894-49fb-a36f-24544c3653e0/extract-content/0.log" Nov 25 11:18:56 crc kubenswrapper[4769]: I1125 11:18:56.120507 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lxhnd_79614043-7894-49fb-a36f-24544c3653e0/extract-utilities/0.log" Nov 25 11:18:56 crc kubenswrapper[4769]: I1125 11:18:56.334317 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j7vxb_6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8/extract-utilities/0.log" Nov 25 11:18:56 crc kubenswrapper[4769]: I1125 11:18:56.530614 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j7vxb_6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8/extract-utilities/0.log" Nov 25 11:18:56 crc kubenswrapper[4769]: I1125 11:18:56.614718 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j7vxb_6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8/extract-content/0.log" Nov 25 11:18:56 crc kubenswrapper[4769]: I1125 11:18:56.635665 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j7vxb_6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8/extract-content/0.log" Nov 25 11:18:56 crc kubenswrapper[4769]: I1125 11:18:56.785417 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j7vxb_6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8/extract-utilities/0.log" Nov 25 11:18:56 crc kubenswrapper[4769]: I1125 11:18:56.833227 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j7vxb_6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8/extract-content/0.log" Nov 25 11:18:57 crc kubenswrapper[4769]: I1125 11:18:57.098875 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd_aec3e6b9-d42c-49a5-bbd2-e99a688bb029/util/0.log" Nov 25 11:18:57 crc kubenswrapper[4769]: I1125 11:18:57.185114 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lxhnd_79614043-7894-49fb-a36f-24544c3653e0/registry-server/0.log" Nov 25 11:18:57 crc kubenswrapper[4769]: I1125 11:18:57.214453 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd_aec3e6b9-d42c-49a5-bbd2-e99a688bb029/util/0.log" Nov 25 11:18:57 crc kubenswrapper[4769]: I1125 11:18:57.279278 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd_aec3e6b9-d42c-49a5-bbd2-e99a688bb029/pull/0.log" Nov 25 11:18:57 crc kubenswrapper[4769]: I1125 11:18:57.332827 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd_aec3e6b9-d42c-49a5-bbd2-e99a688bb029/pull/0.log" Nov 25 11:18:58 crc kubenswrapper[4769]: I1125 11:18:58.001557 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j7vxb_6b2b21af-4ed5-4c0a-bf71-5657ec5cf6e8/registry-server/0.log" Nov 25 11:18:58 crc kubenswrapper[4769]: I1125 11:18:58.041065 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd_aec3e6b9-d42c-49a5-bbd2-e99a688bb029/pull/0.log" Nov 25 11:18:58 crc kubenswrapper[4769]: I1125 11:18:58.041894 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd_aec3e6b9-d42c-49a5-bbd2-e99a688bb029/extract/0.log" Nov 25 11:18:58 crc kubenswrapper[4769]: I1125 11:18:58.054461 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6n9dwd_aec3e6b9-d42c-49a5-bbd2-e99a688bb029/util/0.log" Nov 25 11:18:58 crc kubenswrapper[4769]: I1125 11:18:58.185673 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wn2f7_2ef0d2ad-687b-4157-8ab5-803122670e19/marketplace-operator/0.log" Nov 25 11:18:58 crc kubenswrapper[4769]: I1125 11:18:58.287944 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5cw8t_63da627e-e321-47f1-9743-b13ed41ac4cb/extract-utilities/0.log" Nov 25 11:18:58 crc kubenswrapper[4769]: I1125 11:18:58.483201 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5cw8t_63da627e-e321-47f1-9743-b13ed41ac4cb/extract-utilities/0.log" Nov 25 11:18:58 crc kubenswrapper[4769]: I1125 11:18:58.496077 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5cw8t_63da627e-e321-47f1-9743-b13ed41ac4cb/extract-content/0.log" Nov 25 11:18:58 crc kubenswrapper[4769]: I1125 11:18:58.531798 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5cw8t_63da627e-e321-47f1-9743-b13ed41ac4cb/extract-content/0.log" Nov 25 11:18:58 crc kubenswrapper[4769]: I1125 11:18:58.689952 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5cw8t_63da627e-e321-47f1-9743-b13ed41ac4cb/extract-utilities/0.log" Nov 25 11:18:58 crc kubenswrapper[4769]: I1125 11:18:58.731738 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5cw8t_63da627e-e321-47f1-9743-b13ed41ac4cb/extract-content/0.log" Nov 25 11:18:58 crc kubenswrapper[4769]: I1125 11:18:58.822661 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-426q9_eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e/extract-utilities/0.log" Nov 25 11:18:59 crc kubenswrapper[4769]: I1125 11:18:58.999643 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-426q9_eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e/extract-utilities/0.log" Nov 25 11:18:59 crc kubenswrapper[4769]: I1125 11:18:59.017669 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-426q9_eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e/extract-content/0.log" Nov 25 11:18:59 crc kubenswrapper[4769]: I1125 11:18:59.027721 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5cw8t_63da627e-e321-47f1-9743-b13ed41ac4cb/registry-server/0.log" Nov 25 11:18:59 crc kubenswrapper[4769]: I1125 11:18:59.071570 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-426q9_eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e/extract-content/0.log" Nov 25 11:18:59 crc kubenswrapper[4769]: I1125 11:18:59.307477 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-426q9_eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e/extract-content/0.log" Nov 25 11:18:59 crc kubenswrapper[4769]: I1125 11:18:59.312489 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-426q9_eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e/extract-utilities/0.log" Nov 25 11:18:59 crc kubenswrapper[4769]: I1125 11:18:59.771497 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-426q9_eb9a18c2-02cc-4ee0-8a59-5fb6fbbc424e/registry-server/0.log" Nov 25 11:19:12 crc kubenswrapper[4769]: I1125 11:19:12.761673 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-zsv66_0c1565cf-9a88-4b55-a8b0-3ad109f1ca33/prometheus-operator/0.log" Nov 25 11:19:12 crc kubenswrapper[4769]: I1125 11:19:12.876802 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5848cbcdc4-p94lc_35d3c6d2-4aea-4dbf-9e0f-6fc9fe4933ff/prometheus-operator-admission-webhook/0.log" Nov 25 11:19:12 crc kubenswrapper[4769]: I1125 11:19:12.999131 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-5848cbcdc4-zl226_f5366bf6-8a99-4762-86f8-e0d0b94c19ed/prometheus-operator-admission-webhook/0.log" Nov 25 11:19:13 crc kubenswrapper[4769]: I1125 11:19:13.131665 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-l8nzd_75c93e2f-793a-49b1-bd51-73adf5f2edaf/operator/0.log" Nov 25 11:19:13 crc kubenswrapper[4769]: I1125 11:19:13.209734 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-7d5fb4cbfb-wrf89_cdfffb8b-45ab-4bd4-a29a-b6ad363d9d59/observability-ui-dashboards/0.log" Nov 25 11:19:13 crc kubenswrapper[4769]: I1125 11:19:13.474857 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-qfmdd_9b8ab5a0-3150-4f13-a8a1-4f8859eb1195/perses-operator/0.log" Nov 25 11:19:29 crc kubenswrapper[4769]: I1125 11:19:29.370146 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-cbd48d4d7-8psr6_930d9174-b8de-465e-a1b7-b9aa7c498246/kube-rbac-proxy/0.log" Nov 25 11:19:29 crc kubenswrapper[4769]: I1125 11:19:29.445861 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-cbd48d4d7-8psr6_930d9174-b8de-465e-a1b7-b9aa7c498246/manager/1.log" Nov 25 11:19:29 crc kubenswrapper[4769]: I1125 11:19:29.520564 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-cbd48d4d7-8psr6_930d9174-b8de-465e-a1b7-b9aa7c498246/manager/0.log" Nov 25 11:19:41 crc kubenswrapper[4769]: E1125 11:19:41.208027 4769 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.201:46822->38.102.83.201:43143: write tcp 38.102.83.201:46822->38.102.83.201:43143: write: broken pipe Nov 25 11:19:52 crc kubenswrapper[4769]: I1125 11:19:52.290299 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:19:52 crc kubenswrapper[4769]: I1125 11:19:52.290808 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:20:22 crc kubenswrapper[4769]: I1125 11:20:22.290819 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:20:22 crc kubenswrapper[4769]: I1125 11:20:22.291459 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.045321 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rtw8n"] Nov 25 11:20:23 crc kubenswrapper[4769]: E1125 11:20:23.045935 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0419c8b6-617f-4ad8-9135-16b0e708fa1d" containerName="extract-utilities" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.045953 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0419c8b6-617f-4ad8-9135-16b0e708fa1d" containerName="extract-utilities" Nov 25 11:20:23 crc kubenswrapper[4769]: E1125 11:20:23.045989 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0419c8b6-617f-4ad8-9135-16b0e708fa1d" containerName="extract-content" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.045996 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0419c8b6-617f-4ad8-9135-16b0e708fa1d" containerName="extract-content" Nov 25 11:20:23 crc kubenswrapper[4769]: E1125 11:20:23.046009 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0419c8b6-617f-4ad8-9135-16b0e708fa1d" containerName="registry-server" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.046015 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="0419c8b6-617f-4ad8-9135-16b0e708fa1d" containerName="registry-server" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.046260 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="0419c8b6-617f-4ad8-9135-16b0e708fa1d" containerName="registry-server" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.047991 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.061218 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rtw8n"] Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.214197 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prtkx\" (UniqueName: \"kubernetes.io/projected/46de752e-19bd-459c-9eb3-45e1522368c6-kube-api-access-prtkx\") pod \"community-operators-rtw8n\" (UID: \"46de752e-19bd-459c-9eb3-45e1522368c6\") " pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.214712 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46de752e-19bd-459c-9eb3-45e1522368c6-utilities\") pod \"community-operators-rtw8n\" (UID: \"46de752e-19bd-459c-9eb3-45e1522368c6\") " pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.215217 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46de752e-19bd-459c-9eb3-45e1522368c6-catalog-content\") pod \"community-operators-rtw8n\" (UID: \"46de752e-19bd-459c-9eb3-45e1522368c6\") " pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.317519 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prtkx\" (UniqueName: \"kubernetes.io/projected/46de752e-19bd-459c-9eb3-45e1522368c6-kube-api-access-prtkx\") pod \"community-operators-rtw8n\" (UID: \"46de752e-19bd-459c-9eb3-45e1522368c6\") " pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.317675 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46de752e-19bd-459c-9eb3-45e1522368c6-utilities\") pod \"community-operators-rtw8n\" (UID: \"46de752e-19bd-459c-9eb3-45e1522368c6\") " pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.317725 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46de752e-19bd-459c-9eb3-45e1522368c6-catalog-content\") pod \"community-operators-rtw8n\" (UID: \"46de752e-19bd-459c-9eb3-45e1522368c6\") " pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.318228 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46de752e-19bd-459c-9eb3-45e1522368c6-utilities\") pod \"community-operators-rtw8n\" (UID: \"46de752e-19bd-459c-9eb3-45e1522368c6\") " pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.319665 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46de752e-19bd-459c-9eb3-45e1522368c6-catalog-content\") pod \"community-operators-rtw8n\" (UID: \"46de752e-19bd-459c-9eb3-45e1522368c6\") " pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.336684 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prtkx\" (UniqueName: \"kubernetes.io/projected/46de752e-19bd-459c-9eb3-45e1522368c6-kube-api-access-prtkx\") pod \"community-operators-rtw8n\" (UID: \"46de752e-19bd-459c-9eb3-45e1522368c6\") " pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:23 crc kubenswrapper[4769]: I1125 11:20:23.381184 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:24 crc kubenswrapper[4769]: I1125 11:20:24.062989 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rtw8n"] Nov 25 11:20:24 crc kubenswrapper[4769]: I1125 11:20:24.738547 4769 generic.go:334] "Generic (PLEG): container finished" podID="46de752e-19bd-459c-9eb3-45e1522368c6" containerID="dff5f0967a13e4fce82f4f18d7285ff11d274ff12a5f63bbb2dbd9fba3e5142d" exitCode=0 Nov 25 11:20:24 crc kubenswrapper[4769]: I1125 11:20:24.739936 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtw8n" event={"ID":"46de752e-19bd-459c-9eb3-45e1522368c6","Type":"ContainerDied","Data":"dff5f0967a13e4fce82f4f18d7285ff11d274ff12a5f63bbb2dbd9fba3e5142d"} Nov 25 11:20:24 crc kubenswrapper[4769]: I1125 11:20:24.740228 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtw8n" event={"ID":"46de752e-19bd-459c-9eb3-45e1522368c6","Type":"ContainerStarted","Data":"bf3cc4e8dae143eebe60217554640d792b3baba4dc4edfc8008ae4ab9b264b23"} Nov 25 11:20:26 crc kubenswrapper[4769]: I1125 11:20:26.769726 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtw8n" event={"ID":"46de752e-19bd-459c-9eb3-45e1522368c6","Type":"ContainerStarted","Data":"5f004a7dec50e74bef080a4e11cb4a44718e471d14a69998e7daceb238a6c7ee"} Nov 25 11:20:28 crc kubenswrapper[4769]: I1125 11:20:28.810765 4769 generic.go:334] "Generic (PLEG): container finished" podID="46de752e-19bd-459c-9eb3-45e1522368c6" containerID="5f004a7dec50e74bef080a4e11cb4a44718e471d14a69998e7daceb238a6c7ee" exitCode=0 Nov 25 11:20:28 crc kubenswrapper[4769]: I1125 11:20:28.810853 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtw8n" event={"ID":"46de752e-19bd-459c-9eb3-45e1522368c6","Type":"ContainerDied","Data":"5f004a7dec50e74bef080a4e11cb4a44718e471d14a69998e7daceb238a6c7ee"} Nov 25 11:20:29 crc kubenswrapper[4769]: I1125 11:20:29.824286 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtw8n" event={"ID":"46de752e-19bd-459c-9eb3-45e1522368c6","Type":"ContainerStarted","Data":"e352bc6b25b3c3ef7a117326aebe01e420c83fe2173718816e087282291ca731"} Nov 25 11:20:29 crc kubenswrapper[4769]: I1125 11:20:29.847783 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rtw8n" podStartSLOduration=2.384612008 podStartE2EDuration="6.847764454s" podCreationTimestamp="2025-11-25 11:20:23 +0000 UTC" firstStartedPulling="2025-11-25 11:20:24.742319572 +0000 UTC m=+5773.327291885" lastFinishedPulling="2025-11-25 11:20:29.205472018 +0000 UTC m=+5777.790444331" observedRunningTime="2025-11-25 11:20:29.843039288 +0000 UTC m=+5778.428011611" watchObservedRunningTime="2025-11-25 11:20:29.847764454 +0000 UTC m=+5778.432736767" Nov 25 11:20:33 crc kubenswrapper[4769]: I1125 11:20:33.382038 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:33 crc kubenswrapper[4769]: I1125 11:20:33.382657 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:33 crc kubenswrapper[4769]: I1125 11:20:33.441329 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:43 crc kubenswrapper[4769]: I1125 11:20:43.437092 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:43 crc kubenswrapper[4769]: I1125 11:20:43.504244 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rtw8n"] Nov 25 11:20:43 crc kubenswrapper[4769]: I1125 11:20:43.988514 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rtw8n" podUID="46de752e-19bd-459c-9eb3-45e1522368c6" containerName="registry-server" containerID="cri-o://e352bc6b25b3c3ef7a117326aebe01e420c83fe2173718816e087282291ca731" gracePeriod=2 Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.004400 4769 generic.go:334] "Generic (PLEG): container finished" podID="46de752e-19bd-459c-9eb3-45e1522368c6" containerID="e352bc6b25b3c3ef7a117326aebe01e420c83fe2173718816e087282291ca731" exitCode=0 Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.004468 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtw8n" event={"ID":"46de752e-19bd-459c-9eb3-45e1522368c6","Type":"ContainerDied","Data":"e352bc6b25b3c3ef7a117326aebe01e420c83fe2173718816e087282291ca731"} Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.004756 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rtw8n" event={"ID":"46de752e-19bd-459c-9eb3-45e1522368c6","Type":"ContainerDied","Data":"bf3cc4e8dae143eebe60217554640d792b3baba4dc4edfc8008ae4ab9b264b23"} Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.004777 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf3cc4e8dae143eebe60217554640d792b3baba4dc4edfc8008ae4ab9b264b23" Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.068837 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.175574 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46de752e-19bd-459c-9eb3-45e1522368c6-catalog-content\") pod \"46de752e-19bd-459c-9eb3-45e1522368c6\" (UID: \"46de752e-19bd-459c-9eb3-45e1522368c6\") " Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.175686 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46de752e-19bd-459c-9eb3-45e1522368c6-utilities\") pod \"46de752e-19bd-459c-9eb3-45e1522368c6\" (UID: \"46de752e-19bd-459c-9eb3-45e1522368c6\") " Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.175769 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-prtkx\" (UniqueName: \"kubernetes.io/projected/46de752e-19bd-459c-9eb3-45e1522368c6-kube-api-access-prtkx\") pod \"46de752e-19bd-459c-9eb3-45e1522368c6\" (UID: \"46de752e-19bd-459c-9eb3-45e1522368c6\") " Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.177948 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46de752e-19bd-459c-9eb3-45e1522368c6-utilities" (OuterVolumeSpecName: "utilities") pod "46de752e-19bd-459c-9eb3-45e1522368c6" (UID: "46de752e-19bd-459c-9eb3-45e1522368c6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.183072 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46de752e-19bd-459c-9eb3-45e1522368c6-kube-api-access-prtkx" (OuterVolumeSpecName: "kube-api-access-prtkx") pod "46de752e-19bd-459c-9eb3-45e1522368c6" (UID: "46de752e-19bd-459c-9eb3-45e1522368c6"). InnerVolumeSpecName "kube-api-access-prtkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.236598 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46de752e-19bd-459c-9eb3-45e1522368c6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "46de752e-19bd-459c-9eb3-45e1522368c6" (UID: "46de752e-19bd-459c-9eb3-45e1522368c6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.279351 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/46de752e-19bd-459c-9eb3-45e1522368c6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.279378 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/46de752e-19bd-459c-9eb3-45e1522368c6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:20:45 crc kubenswrapper[4769]: I1125 11:20:45.279391 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-prtkx\" (UniqueName: \"kubernetes.io/projected/46de752e-19bd-459c-9eb3-45e1522368c6-kube-api-access-prtkx\") on node \"crc\" DevicePath \"\"" Nov 25 11:20:46 crc kubenswrapper[4769]: I1125 11:20:46.016231 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rtw8n" Nov 25 11:20:46 crc kubenswrapper[4769]: I1125 11:20:46.167854 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rtw8n"] Nov 25 11:20:46 crc kubenswrapper[4769]: I1125 11:20:46.179159 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rtw8n"] Nov 25 11:20:46 crc kubenswrapper[4769]: I1125 11:20:46.254273 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46de752e-19bd-459c-9eb3-45e1522368c6" path="/var/lib/kubelet/pods/46de752e-19bd-459c-9eb3-45e1522368c6/volumes" Nov 25 11:20:52 crc kubenswrapper[4769]: I1125 11:20:52.290273 4769 patch_prober.go:28] interesting pod/machine-config-daemon-98mzt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:20:52 crc kubenswrapper[4769]: I1125 11:20:52.290906 4769 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:20:52 crc kubenswrapper[4769]: I1125 11:20:52.290993 4769 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" Nov 25 11:20:52 crc kubenswrapper[4769]: I1125 11:20:52.292076 4769 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8"} pod="openshift-machine-config-operator/machine-config-daemon-98mzt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 11:20:52 crc kubenswrapper[4769]: I1125 11:20:52.292136 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerName="machine-config-daemon" containerID="cri-o://244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" gracePeriod=600 Nov 25 11:20:52 crc kubenswrapper[4769]: E1125 11:20:52.445751 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:20:52 crc kubenswrapper[4769]: I1125 11:20:52.858109 4769 scope.go:117] "RemoveContainer" containerID="dea5af91b1133d945c8eb44a81e8869d8b26897882c39093eb5d7c561e5b486d" Nov 25 11:20:53 crc kubenswrapper[4769]: I1125 11:20:53.151933 4769 generic.go:334] "Generic (PLEG): container finished" podID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" exitCode=0 Nov 25 11:20:53 crc kubenswrapper[4769]: I1125 11:20:53.152018 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerDied","Data":"244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8"} Nov 25 11:20:53 crc kubenswrapper[4769]: I1125 11:20:53.152276 4769 scope.go:117] "RemoveContainer" containerID="774bc57830d3ea61329fafb65ae2f3776de79f07090f20a76a66522b46be9570" Nov 25 11:20:53 crc kubenswrapper[4769]: I1125 11:20:53.159526 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:20:53 crc kubenswrapper[4769]: E1125 11:20:53.160099 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:21:06 crc kubenswrapper[4769]: I1125 11:21:06.240002 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:21:06 crc kubenswrapper[4769]: E1125 11:21:06.241130 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:21:21 crc kubenswrapper[4769]: I1125 11:21:21.237581 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:21:21 crc kubenswrapper[4769]: E1125 11:21:21.238409 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:21:27 crc kubenswrapper[4769]: I1125 11:21:27.576311 4769 generic.go:334] "Generic (PLEG): container finished" podID="947890df-6173-4295-985a-8039cc77cca4" containerID="1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff" exitCode=0 Nov 25 11:21:27 crc kubenswrapper[4769]: I1125 11:21:27.576741 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pd2j4/must-gather-5nnmn" event={"ID":"947890df-6173-4295-985a-8039cc77cca4","Type":"ContainerDied","Data":"1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff"} Nov 25 11:21:27 crc kubenswrapper[4769]: I1125 11:21:27.577755 4769 scope.go:117] "RemoveContainer" containerID="1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff" Nov 25 11:21:28 crc kubenswrapper[4769]: I1125 11:21:28.117217 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pd2j4_must-gather-5nnmn_947890df-6173-4295-985a-8039cc77cca4/gather/0.log" Nov 25 11:21:34 crc kubenswrapper[4769]: I1125 11:21:34.243243 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:21:34 crc kubenswrapper[4769]: E1125 11:21:34.245133 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:21:36 crc kubenswrapper[4769]: I1125 11:21:36.646292 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pd2j4/must-gather-5nnmn"] Nov 25 11:21:36 crc kubenswrapper[4769]: I1125 11:21:36.647141 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-pd2j4/must-gather-5nnmn" podUID="947890df-6173-4295-985a-8039cc77cca4" containerName="copy" containerID="cri-o://b92d50d366e551d74d95c4369d526dd50766ca69b7dc3430c23092d1c50dfef0" gracePeriod=2 Nov 25 11:21:36 crc kubenswrapper[4769]: I1125 11:21:36.657587 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pd2j4/must-gather-5nnmn"] Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.226350 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pd2j4_must-gather-5nnmn_947890df-6173-4295-985a-8039cc77cca4/copy/0.log" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.230173 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/must-gather-5nnmn" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.271346 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/947890df-6173-4295-985a-8039cc77cca4-must-gather-output\") pod \"947890df-6173-4295-985a-8039cc77cca4\" (UID: \"947890df-6173-4295-985a-8039cc77cca4\") " Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.271490 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvn7p\" (UniqueName: \"kubernetes.io/projected/947890df-6173-4295-985a-8039cc77cca4-kube-api-access-nvn7p\") pod \"947890df-6173-4295-985a-8039cc77cca4\" (UID: \"947890df-6173-4295-985a-8039cc77cca4\") " Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.297247 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/947890df-6173-4295-985a-8039cc77cca4-kube-api-access-nvn7p" (OuterVolumeSpecName: "kube-api-access-nvn7p") pod "947890df-6173-4295-985a-8039cc77cca4" (UID: "947890df-6173-4295-985a-8039cc77cca4"). InnerVolumeSpecName "kube-api-access-nvn7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.374819 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvn7p\" (UniqueName: \"kubernetes.io/projected/947890df-6173-4295-985a-8039cc77cca4-kube-api-access-nvn7p\") on node \"crc\" DevicePath \"\"" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.524810 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/947890df-6173-4295-985a-8039cc77cca4-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "947890df-6173-4295-985a-8039cc77cca4" (UID: "947890df-6173-4295-985a-8039cc77cca4"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.578351 4769 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/947890df-6173-4295-985a-8039cc77cca4-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.692640 4769 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pd2j4_must-gather-5nnmn_947890df-6173-4295-985a-8039cc77cca4/copy/0.log" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.698121 4769 generic.go:334] "Generic (PLEG): container finished" podID="947890df-6173-4295-985a-8039cc77cca4" containerID="b92d50d366e551d74d95c4369d526dd50766ca69b7dc3430c23092d1c50dfef0" exitCode=143 Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.698158 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pd2j4/must-gather-5nnmn" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.698177 4769 scope.go:117] "RemoveContainer" containerID="b92d50d366e551d74d95c4369d526dd50766ca69b7dc3430c23092d1c50dfef0" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.725277 4769 scope.go:117] "RemoveContainer" containerID="1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.833259 4769 scope.go:117] "RemoveContainer" containerID="b92d50d366e551d74d95c4369d526dd50766ca69b7dc3430c23092d1c50dfef0" Nov 25 11:21:37 crc kubenswrapper[4769]: E1125 11:21:37.833621 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b92d50d366e551d74d95c4369d526dd50766ca69b7dc3430c23092d1c50dfef0\": container with ID starting with b92d50d366e551d74d95c4369d526dd50766ca69b7dc3430c23092d1c50dfef0 not found: ID does not exist" containerID="b92d50d366e551d74d95c4369d526dd50766ca69b7dc3430c23092d1c50dfef0" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.833656 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b92d50d366e551d74d95c4369d526dd50766ca69b7dc3430c23092d1c50dfef0"} err="failed to get container status \"b92d50d366e551d74d95c4369d526dd50766ca69b7dc3430c23092d1c50dfef0\": rpc error: code = NotFound desc = could not find container \"b92d50d366e551d74d95c4369d526dd50766ca69b7dc3430c23092d1c50dfef0\": container with ID starting with b92d50d366e551d74d95c4369d526dd50766ca69b7dc3430c23092d1c50dfef0 not found: ID does not exist" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.833695 4769 scope.go:117] "RemoveContainer" containerID="1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff" Nov 25 11:21:37 crc kubenswrapper[4769]: E1125 11:21:37.834098 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff\": container with ID starting with 1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff not found: ID does not exist" containerID="1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff" Nov 25 11:21:37 crc kubenswrapper[4769]: I1125 11:21:37.834155 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff"} err="failed to get container status \"1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff\": rpc error: code = NotFound desc = could not find container \"1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff\": container with ID starting with 1828efa278c8a5888cff7203f3536fd09d17575a78b495ad34407a53c659aaff not found: ID does not exist" Nov 25 11:21:38 crc kubenswrapper[4769]: I1125 11:21:38.252953 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="947890df-6173-4295-985a-8039cc77cca4" path="/var/lib/kubelet/pods/947890df-6173-4295-985a-8039cc77cca4/volumes" Nov 25 11:21:46 crc kubenswrapper[4769]: I1125 11:21:46.238332 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:21:46 crc kubenswrapper[4769]: E1125 11:21:46.239338 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:21:57 crc kubenswrapper[4769]: I1125 11:21:57.238090 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:21:57 crc kubenswrapper[4769]: E1125 11:21:57.238948 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.439305 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wbm85"] Nov 25 11:21:58 crc kubenswrapper[4769]: E1125 11:21:58.440335 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46de752e-19bd-459c-9eb3-45e1522368c6" containerName="extract-content" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.440353 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="46de752e-19bd-459c-9eb3-45e1522368c6" containerName="extract-content" Nov 25 11:21:58 crc kubenswrapper[4769]: E1125 11:21:58.440393 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46de752e-19bd-459c-9eb3-45e1522368c6" containerName="extract-utilities" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.440402 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="46de752e-19bd-459c-9eb3-45e1522368c6" containerName="extract-utilities" Nov 25 11:21:58 crc kubenswrapper[4769]: E1125 11:21:58.440430 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="947890df-6173-4295-985a-8039cc77cca4" containerName="gather" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.440438 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="947890df-6173-4295-985a-8039cc77cca4" containerName="gather" Nov 25 11:21:58 crc kubenswrapper[4769]: E1125 11:21:58.440448 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="947890df-6173-4295-985a-8039cc77cca4" containerName="copy" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.440456 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="947890df-6173-4295-985a-8039cc77cca4" containerName="copy" Nov 25 11:21:58 crc kubenswrapper[4769]: E1125 11:21:58.440473 4769 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46de752e-19bd-459c-9eb3-45e1522368c6" containerName="registry-server" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.440481 4769 state_mem.go:107] "Deleted CPUSet assignment" podUID="46de752e-19bd-459c-9eb3-45e1522368c6" containerName="registry-server" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.440851 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="947890df-6173-4295-985a-8039cc77cca4" containerName="gather" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.440870 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="46de752e-19bd-459c-9eb3-45e1522368c6" containerName="registry-server" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.440914 4769 memory_manager.go:354] "RemoveStaleState removing state" podUID="947890df-6173-4295-985a-8039cc77cca4" containerName="copy" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.443281 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.455612 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wbm85"] Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.537163 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f0a815e-5cd1-408f-b849-bfcf30e019fa-catalog-content\") pod \"redhat-operators-wbm85\" (UID: \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\") " pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.537438 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f0a815e-5cd1-408f-b849-bfcf30e019fa-utilities\") pod \"redhat-operators-wbm85\" (UID: \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\") " pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.537467 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs9s5\" (UniqueName: \"kubernetes.io/projected/1f0a815e-5cd1-408f-b849-bfcf30e019fa-kube-api-access-bs9s5\") pod \"redhat-operators-wbm85\" (UID: \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\") " pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.639558 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f0a815e-5cd1-408f-b849-bfcf30e019fa-utilities\") pod \"redhat-operators-wbm85\" (UID: \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\") " pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.639611 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs9s5\" (UniqueName: \"kubernetes.io/projected/1f0a815e-5cd1-408f-b849-bfcf30e019fa-kube-api-access-bs9s5\") pod \"redhat-operators-wbm85\" (UID: \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\") " pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.639651 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f0a815e-5cd1-408f-b849-bfcf30e019fa-catalog-content\") pod \"redhat-operators-wbm85\" (UID: \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\") " pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.640328 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f0a815e-5cd1-408f-b849-bfcf30e019fa-catalog-content\") pod \"redhat-operators-wbm85\" (UID: \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\") " pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.640539 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f0a815e-5cd1-408f-b849-bfcf30e019fa-utilities\") pod \"redhat-operators-wbm85\" (UID: \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\") " pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.669007 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs9s5\" (UniqueName: \"kubernetes.io/projected/1f0a815e-5cd1-408f-b849-bfcf30e019fa-kube-api-access-bs9s5\") pod \"redhat-operators-wbm85\" (UID: \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\") " pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:21:58 crc kubenswrapper[4769]: I1125 11:21:58.767259 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:21:59 crc kubenswrapper[4769]: I1125 11:21:59.275895 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wbm85"] Nov 25 11:22:00 crc kubenswrapper[4769]: I1125 11:22:00.227734 4769 generic.go:334] "Generic (PLEG): container finished" podID="1f0a815e-5cd1-408f-b849-bfcf30e019fa" containerID="40a0b9419b69476b9365dca3269f8139d8be457a0afd00c12794cdcdd6b6b716" exitCode=0 Nov 25 11:22:00 crc kubenswrapper[4769]: I1125 11:22:00.227865 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbm85" event={"ID":"1f0a815e-5cd1-408f-b849-bfcf30e019fa","Type":"ContainerDied","Data":"40a0b9419b69476b9365dca3269f8139d8be457a0afd00c12794cdcdd6b6b716"} Nov 25 11:22:00 crc kubenswrapper[4769]: I1125 11:22:00.228106 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbm85" event={"ID":"1f0a815e-5cd1-408f-b849-bfcf30e019fa","Type":"ContainerStarted","Data":"55ee1f40c6b3f939c8fd003ffcc6757544bbd60363df6f0f38ec544ab0190a69"} Nov 25 11:22:00 crc kubenswrapper[4769]: I1125 11:22:00.231873 4769 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:22:01 crc kubenswrapper[4769]: I1125 11:22:01.243169 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbm85" event={"ID":"1f0a815e-5cd1-408f-b849-bfcf30e019fa","Type":"ContainerStarted","Data":"9d6e66e323be0e02398d6fd8ca4b9fe515aaba19e99e5b0663155030f176271e"} Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.229165 4769 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zplh2"] Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.232396 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.252603 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zplh2"] Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.376010 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d-utilities\") pod \"certified-operators-zplh2\" (UID: \"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d\") " pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.376258 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c49rc\" (UniqueName: \"kubernetes.io/projected/76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d-kube-api-access-c49rc\") pod \"certified-operators-zplh2\" (UID: \"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d\") " pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.376703 4769 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d-catalog-content\") pod \"certified-operators-zplh2\" (UID: \"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d\") " pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.479622 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d-utilities\") pod \"certified-operators-zplh2\" (UID: \"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d\") " pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.479788 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c49rc\" (UniqueName: \"kubernetes.io/projected/76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d-kube-api-access-c49rc\") pod \"certified-operators-zplh2\" (UID: \"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d\") " pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.479894 4769 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d-catalog-content\") pod \"certified-operators-zplh2\" (UID: \"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d\") " pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.480228 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d-utilities\") pod \"certified-operators-zplh2\" (UID: \"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d\") " pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.480545 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d-catalog-content\") pod \"certified-operators-zplh2\" (UID: \"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d\") " pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.511167 4769 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c49rc\" (UniqueName: \"kubernetes.io/projected/76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d-kube-api-access-c49rc\") pod \"certified-operators-zplh2\" (UID: \"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d\") " pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:07 crc kubenswrapper[4769]: I1125 11:22:07.550917 4769 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:08 crc kubenswrapper[4769]: I1125 11:22:08.218610 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zplh2"] Nov 25 11:22:08 crc kubenswrapper[4769]: I1125 11:22:08.332305 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zplh2" event={"ID":"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d","Type":"ContainerStarted","Data":"593c5260d1183bd5b292bcf0131af559751e47885295514138925ffef46b30d5"} Nov 25 11:22:09 crc kubenswrapper[4769]: I1125 11:22:09.344461 4769 generic.go:334] "Generic (PLEG): container finished" podID="76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d" containerID="516dc4398627894d3a4e8bd535da7b4d4348720764f5e71ef69d7f4e0b7b5b50" exitCode=0 Nov 25 11:22:09 crc kubenswrapper[4769]: I1125 11:22:09.344557 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zplh2" event={"ID":"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d","Type":"ContainerDied","Data":"516dc4398627894d3a4e8bd535da7b4d4348720764f5e71ef69d7f4e0b7b5b50"} Nov 25 11:22:09 crc kubenswrapper[4769]: I1125 11:22:09.347682 4769 generic.go:334] "Generic (PLEG): container finished" podID="1f0a815e-5cd1-408f-b849-bfcf30e019fa" containerID="9d6e66e323be0e02398d6fd8ca4b9fe515aaba19e99e5b0663155030f176271e" exitCode=0 Nov 25 11:22:09 crc kubenswrapper[4769]: I1125 11:22:09.347821 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbm85" event={"ID":"1f0a815e-5cd1-408f-b849-bfcf30e019fa","Type":"ContainerDied","Data":"9d6e66e323be0e02398d6fd8ca4b9fe515aaba19e99e5b0663155030f176271e"} Nov 25 11:22:10 crc kubenswrapper[4769]: I1125 11:22:10.365465 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbm85" event={"ID":"1f0a815e-5cd1-408f-b849-bfcf30e019fa","Type":"ContainerStarted","Data":"8125845c77e1b40e0950bf568befc617ffb12911afb232fa0f571055f65b2ca4"} Nov 25 11:22:10 crc kubenswrapper[4769]: I1125 11:22:10.399888 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wbm85" podStartSLOduration=2.7019811860000003 podStartE2EDuration="12.399864211s" podCreationTimestamp="2025-11-25 11:21:58 +0000 UTC" firstStartedPulling="2025-11-25 11:22:00.231594344 +0000 UTC m=+5868.816566657" lastFinishedPulling="2025-11-25 11:22:09.929477379 +0000 UTC m=+5878.514449682" observedRunningTime="2025-11-25 11:22:10.38925891 +0000 UTC m=+5878.974231243" watchObservedRunningTime="2025-11-25 11:22:10.399864211 +0000 UTC m=+5878.984836524" Nov 25 11:22:11 crc kubenswrapper[4769]: I1125 11:22:11.237629 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:22:11 crc kubenswrapper[4769]: E1125 11:22:11.238068 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:22:15 crc kubenswrapper[4769]: I1125 11:22:15.435719 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zplh2" event={"ID":"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d","Type":"ContainerStarted","Data":"98ffed088a7dc6c9be73dcb6cc8b7350a6dec3a764b56d430ec7af358ae2f5cf"} Nov 25 11:22:16 crc kubenswrapper[4769]: E1125 11:22:16.196477 4769 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76c57b7e_7bd6_45fc_8c69_55a7a10f8a0d.slice/crio-conmon-98ffed088a7dc6c9be73dcb6cc8b7350a6dec3a764b56d430ec7af358ae2f5cf.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76c57b7e_7bd6_45fc_8c69_55a7a10f8a0d.slice/crio-98ffed088a7dc6c9be73dcb6cc8b7350a6dec3a764b56d430ec7af358ae2f5cf.scope\": RecentStats: unable to find data in memory cache]" Nov 25 11:22:16 crc kubenswrapper[4769]: I1125 11:22:16.451040 4769 generic.go:334] "Generic (PLEG): container finished" podID="76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d" containerID="98ffed088a7dc6c9be73dcb6cc8b7350a6dec3a764b56d430ec7af358ae2f5cf" exitCode=0 Nov 25 11:22:16 crc kubenswrapper[4769]: I1125 11:22:16.451115 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zplh2" event={"ID":"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d","Type":"ContainerDied","Data":"98ffed088a7dc6c9be73dcb6cc8b7350a6dec3a764b56d430ec7af358ae2f5cf"} Nov 25 11:22:18 crc kubenswrapper[4769]: I1125 11:22:18.489804 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zplh2" event={"ID":"76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d","Type":"ContainerStarted","Data":"5053588bac78439892c8b0dfeae968e8c59bcaa30ade350d906dc01ce12ae211"} Nov 25 11:22:18 crc kubenswrapper[4769]: I1125 11:22:18.767673 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:22:18 crc kubenswrapper[4769]: I1125 11:22:18.768079 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:22:19 crc kubenswrapper[4769]: I1125 11:22:19.820507 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wbm85" podUID="1f0a815e-5cd1-408f-b849-bfcf30e019fa" containerName="registry-server" probeResult="failure" output=< Nov 25 11:22:19 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:22:19 crc kubenswrapper[4769]: > Nov 25 11:22:26 crc kubenswrapper[4769]: I1125 11:22:26.237718 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:22:26 crc kubenswrapper[4769]: E1125 11:22:26.238463 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:22:27 crc kubenswrapper[4769]: I1125 11:22:27.552142 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:27 crc kubenswrapper[4769]: I1125 11:22:27.552521 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:29 crc kubenswrapper[4769]: I1125 11:22:29.025639 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-zplh2" podUID="76c57b7e-7bd6-45fc-8c69-55a7a10f8a0d" containerName="registry-server" probeResult="failure" output=< Nov 25 11:22:29 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:22:29 crc kubenswrapper[4769]: > Nov 25 11:22:29 crc kubenswrapper[4769]: I1125 11:22:29.822071 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wbm85" podUID="1f0a815e-5cd1-408f-b849-bfcf30e019fa" containerName="registry-server" probeResult="failure" output=< Nov 25 11:22:29 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:22:29 crc kubenswrapper[4769]: > Nov 25 11:22:37 crc kubenswrapper[4769]: I1125 11:22:37.733625 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:37 crc kubenswrapper[4769]: I1125 11:22:37.759064 4769 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zplh2" podStartSLOduration=22.910712817 podStartE2EDuration="30.759039393s" podCreationTimestamp="2025-11-25 11:22:07 +0000 UTC" firstStartedPulling="2025-11-25 11:22:09.346659849 +0000 UTC m=+5877.931632162" lastFinishedPulling="2025-11-25 11:22:17.194986425 +0000 UTC m=+5885.779958738" observedRunningTime="2025-11-25 11:22:18.512289923 +0000 UTC m=+5887.097262236" watchObservedRunningTime="2025-11-25 11:22:37.759039393 +0000 UTC m=+5906.344011716" Nov 25 11:22:37 crc kubenswrapper[4769]: I1125 11:22:37.791771 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zplh2" Nov 25 11:22:38 crc kubenswrapper[4769]: I1125 11:22:38.237169 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:22:38 crc kubenswrapper[4769]: E1125 11:22:38.237654 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:22:39 crc kubenswrapper[4769]: I1125 11:22:39.828506 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wbm85" podUID="1f0a815e-5cd1-408f-b849-bfcf30e019fa" containerName="registry-server" probeResult="failure" output=< Nov 25 11:22:39 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:22:39 crc kubenswrapper[4769]: > Nov 25 11:22:40 crc kubenswrapper[4769]: I1125 11:22:40.685386 4769 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zplh2"] Nov 25 11:22:40 crc kubenswrapper[4769]: I1125 11:22:40.840735 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lxhnd"] Nov 25 11:22:40 crc kubenswrapper[4769]: I1125 11:22:40.841061 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lxhnd" podUID="79614043-7894-49fb-a36f-24544c3653e0" containerName="registry-server" containerID="cri-o://833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4" gracePeriod=2 Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.369951 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.454486 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79614043-7894-49fb-a36f-24544c3653e0-utilities\") pod \"79614043-7894-49fb-a36f-24544c3653e0\" (UID: \"79614043-7894-49fb-a36f-24544c3653e0\") " Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.454689 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pf6ft\" (UniqueName: \"kubernetes.io/projected/79614043-7894-49fb-a36f-24544c3653e0-kube-api-access-pf6ft\") pod \"79614043-7894-49fb-a36f-24544c3653e0\" (UID: \"79614043-7894-49fb-a36f-24544c3653e0\") " Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.454858 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79614043-7894-49fb-a36f-24544c3653e0-catalog-content\") pod \"79614043-7894-49fb-a36f-24544c3653e0\" (UID: \"79614043-7894-49fb-a36f-24544c3653e0\") " Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.460529 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79614043-7894-49fb-a36f-24544c3653e0-utilities" (OuterVolumeSpecName: "utilities") pod "79614043-7894-49fb-a36f-24544c3653e0" (UID: "79614043-7894-49fb-a36f-24544c3653e0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.508430 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79614043-7894-49fb-a36f-24544c3653e0-kube-api-access-pf6ft" (OuterVolumeSpecName: "kube-api-access-pf6ft") pod "79614043-7894-49fb-a36f-24544c3653e0" (UID: "79614043-7894-49fb-a36f-24544c3653e0"). InnerVolumeSpecName "kube-api-access-pf6ft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.570924 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pf6ft\" (UniqueName: \"kubernetes.io/projected/79614043-7894-49fb-a36f-24544c3653e0-kube-api-access-pf6ft\") on node \"crc\" DevicePath \"\"" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.570966 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79614043-7894-49fb-a36f-24544c3653e0-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.575383 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79614043-7894-49fb-a36f-24544c3653e0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "79614043-7894-49fb-a36f-24544c3653e0" (UID: "79614043-7894-49fb-a36f-24544c3653e0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.673619 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79614043-7894-49fb-a36f-24544c3653e0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.781881 4769 generic.go:334] "Generic (PLEG): container finished" podID="79614043-7894-49fb-a36f-24544c3653e0" containerID="833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4" exitCode=0 Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.781929 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxhnd" event={"ID":"79614043-7894-49fb-a36f-24544c3653e0","Type":"ContainerDied","Data":"833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4"} Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.781958 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lxhnd" event={"ID":"79614043-7894-49fb-a36f-24544c3653e0","Type":"ContainerDied","Data":"513483fb908cf729e447a2ab280169e0751c86b2755f2d90adfe87ecc8601f5b"} Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.781997 4769 scope.go:117] "RemoveContainer" containerID="833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.781990 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lxhnd" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.843161 4769 scope.go:117] "RemoveContainer" containerID="d73e2ad3fd3d64fdae2400fcc0e79a44e26ab44819a4d363d45320f02c683d92" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.857372 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lxhnd"] Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.871457 4769 scope.go:117] "RemoveContainer" containerID="08a93dd4eeef3d51c6f3b966e246771921a0ff219a356cc9cf4280c9bc933994" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.871831 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lxhnd"] Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.939826 4769 scope.go:117] "RemoveContainer" containerID="833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4" Nov 25 11:22:42 crc kubenswrapper[4769]: E1125 11:22:41.940523 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4\": container with ID starting with 833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4 not found: ID does not exist" containerID="833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.940580 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4"} err="failed to get container status \"833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4\": rpc error: code = NotFound desc = could not find container \"833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4\": container with ID starting with 833d6f4e2efddb7522b9a67a71044615891ff00707d2303eb0a7b3b9028a22f4 not found: ID does not exist" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.940616 4769 scope.go:117] "RemoveContainer" containerID="d73e2ad3fd3d64fdae2400fcc0e79a44e26ab44819a4d363d45320f02c683d92" Nov 25 11:22:42 crc kubenswrapper[4769]: E1125 11:22:41.941091 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d73e2ad3fd3d64fdae2400fcc0e79a44e26ab44819a4d363d45320f02c683d92\": container with ID starting with d73e2ad3fd3d64fdae2400fcc0e79a44e26ab44819a4d363d45320f02c683d92 not found: ID does not exist" containerID="d73e2ad3fd3d64fdae2400fcc0e79a44e26ab44819a4d363d45320f02c683d92" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.941123 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d73e2ad3fd3d64fdae2400fcc0e79a44e26ab44819a4d363d45320f02c683d92"} err="failed to get container status \"d73e2ad3fd3d64fdae2400fcc0e79a44e26ab44819a4d363d45320f02c683d92\": rpc error: code = NotFound desc = could not find container \"d73e2ad3fd3d64fdae2400fcc0e79a44e26ab44819a4d363d45320f02c683d92\": container with ID starting with d73e2ad3fd3d64fdae2400fcc0e79a44e26ab44819a4d363d45320f02c683d92 not found: ID does not exist" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.941146 4769 scope.go:117] "RemoveContainer" containerID="08a93dd4eeef3d51c6f3b966e246771921a0ff219a356cc9cf4280c9bc933994" Nov 25 11:22:42 crc kubenswrapper[4769]: E1125 11:22:41.941385 4769 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08a93dd4eeef3d51c6f3b966e246771921a0ff219a356cc9cf4280c9bc933994\": container with ID starting with 08a93dd4eeef3d51c6f3b966e246771921a0ff219a356cc9cf4280c9bc933994 not found: ID does not exist" containerID="08a93dd4eeef3d51c6f3b966e246771921a0ff219a356cc9cf4280c9bc933994" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:41.941404 4769 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08a93dd4eeef3d51c6f3b966e246771921a0ff219a356cc9cf4280c9bc933994"} err="failed to get container status \"08a93dd4eeef3d51c6f3b966e246771921a0ff219a356cc9cf4280c9bc933994\": rpc error: code = NotFound desc = could not find container \"08a93dd4eeef3d51c6f3b966e246771921a0ff219a356cc9cf4280c9bc933994\": container with ID starting with 08a93dd4eeef3d51c6f3b966e246771921a0ff219a356cc9cf4280c9bc933994 not found: ID does not exist" Nov 25 11:22:42 crc kubenswrapper[4769]: I1125 11:22:42.252543 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79614043-7894-49fb-a36f-24544c3653e0" path="/var/lib/kubelet/pods/79614043-7894-49fb-a36f-24544c3653e0/volumes" Nov 25 11:22:49 crc kubenswrapper[4769]: I1125 11:22:49.824433 4769 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wbm85" podUID="1f0a815e-5cd1-408f-b849-bfcf30e019fa" containerName="registry-server" probeResult="failure" output=< Nov 25 11:22:49 crc kubenswrapper[4769]: timeout: failed to connect service ":50051" within 1s Nov 25 11:22:49 crc kubenswrapper[4769]: > Nov 25 11:22:50 crc kubenswrapper[4769]: I1125 11:22:50.238709 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:22:50 crc kubenswrapper[4769]: E1125 11:22:50.239248 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:22:58 crc kubenswrapper[4769]: I1125 11:22:58.828534 4769 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:22:58 crc kubenswrapper[4769]: I1125 11:22:58.898841 4769 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:22:59 crc kubenswrapper[4769]: I1125 11:22:59.660625 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wbm85"] Nov 25 11:22:59 crc kubenswrapper[4769]: I1125 11:22:59.978904 4769 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wbm85" podUID="1f0a815e-5cd1-408f-b849-bfcf30e019fa" containerName="registry-server" containerID="cri-o://8125845c77e1b40e0950bf568befc617ffb12911afb232fa0f571055f65b2ca4" gracePeriod=2 Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.001882 4769 generic.go:334] "Generic (PLEG): container finished" podID="1f0a815e-5cd1-408f-b849-bfcf30e019fa" containerID="8125845c77e1b40e0950bf568befc617ffb12911afb232fa0f571055f65b2ca4" exitCode=0 Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.001935 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbm85" event={"ID":"1f0a815e-5cd1-408f-b849-bfcf30e019fa","Type":"ContainerDied","Data":"8125845c77e1b40e0950bf568befc617ffb12911afb232fa0f571055f65b2ca4"} Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.002879 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wbm85" event={"ID":"1f0a815e-5cd1-408f-b849-bfcf30e019fa","Type":"ContainerDied","Data":"55ee1f40c6b3f939c8fd003ffcc6757544bbd60363df6f0f38ec544ab0190a69"} Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.002897 4769 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="55ee1f40c6b3f939c8fd003ffcc6757544bbd60363df6f0f38ec544ab0190a69" Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.041323 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.129059 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f0a815e-5cd1-408f-b849-bfcf30e019fa-utilities\") pod \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\" (UID: \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\") " Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.129361 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f0a815e-5cd1-408f-b849-bfcf30e019fa-catalog-content\") pod \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\" (UID: \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\") " Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.129468 4769 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bs9s5\" (UniqueName: \"kubernetes.io/projected/1f0a815e-5cd1-408f-b849-bfcf30e019fa-kube-api-access-bs9s5\") pod \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\" (UID: \"1f0a815e-5cd1-408f-b849-bfcf30e019fa\") " Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.131030 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f0a815e-5cd1-408f-b849-bfcf30e019fa-utilities" (OuterVolumeSpecName: "utilities") pod "1f0a815e-5cd1-408f-b849-bfcf30e019fa" (UID: "1f0a815e-5cd1-408f-b849-bfcf30e019fa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.137108 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f0a815e-5cd1-408f-b849-bfcf30e019fa-kube-api-access-bs9s5" (OuterVolumeSpecName: "kube-api-access-bs9s5") pod "1f0a815e-5cd1-408f-b849-bfcf30e019fa" (UID: "1f0a815e-5cd1-408f-b849-bfcf30e019fa"). InnerVolumeSpecName "kube-api-access-bs9s5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.228897 4769 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f0a815e-5cd1-408f-b849-bfcf30e019fa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1f0a815e-5cd1-408f-b849-bfcf30e019fa" (UID: "1f0a815e-5cd1-408f-b849-bfcf30e019fa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.231860 4769 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bs9s5\" (UniqueName: \"kubernetes.io/projected/1f0a815e-5cd1-408f-b849-bfcf30e019fa-kube-api-access-bs9s5\") on node \"crc\" DevicePath \"\"" Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.231900 4769 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f0a815e-5cd1-408f-b849-bfcf30e019fa-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:23:01 crc kubenswrapper[4769]: I1125 11:23:01.231913 4769 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f0a815e-5cd1-408f-b849-bfcf30e019fa-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:23:02 crc kubenswrapper[4769]: I1125 11:23:02.013777 4769 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wbm85" Nov 25 11:23:02 crc kubenswrapper[4769]: I1125 11:23:02.061643 4769 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wbm85"] Nov 25 11:23:02 crc kubenswrapper[4769]: I1125 11:23:02.073193 4769 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wbm85"] Nov 25 11:23:02 crc kubenswrapper[4769]: I1125 11:23:02.252325 4769 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f0a815e-5cd1-408f-b849-bfcf30e019fa" path="/var/lib/kubelet/pods/1f0a815e-5cd1-408f-b849-bfcf30e019fa/volumes" Nov 25 11:23:03 crc kubenswrapper[4769]: I1125 11:23:03.237679 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:23:03 crc kubenswrapper[4769]: E1125 11:23:03.240726 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:23:18 crc kubenswrapper[4769]: I1125 11:23:18.236714 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:23:18 crc kubenswrapper[4769]: E1125 11:23:18.237487 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:23:31 crc kubenswrapper[4769]: I1125 11:23:31.237428 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:23:31 crc kubenswrapper[4769]: E1125 11:23:31.238119 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:23:46 crc kubenswrapper[4769]: I1125 11:23:46.237866 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:23:46 crc kubenswrapper[4769]: E1125 11:23:46.238789 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:23:57 crc kubenswrapper[4769]: I1125 11:23:57.237814 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:23:57 crc kubenswrapper[4769]: E1125 11:23:57.238730 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:24:11 crc kubenswrapper[4769]: I1125 11:24:11.237208 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:24:11 crc kubenswrapper[4769]: E1125 11:24:11.237865 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:24:23 crc kubenswrapper[4769]: I1125 11:24:23.237652 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:24:23 crc kubenswrapper[4769]: E1125 11:24:23.238548 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:24:38 crc kubenswrapper[4769]: I1125 11:24:38.238028 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:24:38 crc kubenswrapper[4769]: E1125 11:24:38.238801 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:24:52 crc kubenswrapper[4769]: I1125 11:24:52.251522 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:24:52 crc kubenswrapper[4769]: E1125 11:24:52.252533 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:25:06 crc kubenswrapper[4769]: I1125 11:25:06.238264 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:25:06 crc kubenswrapper[4769]: E1125 11:25:06.239658 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:25:17 crc kubenswrapper[4769]: I1125 11:25:17.237434 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:25:17 crc kubenswrapper[4769]: E1125 11:25:17.239902 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:25:30 crc kubenswrapper[4769]: I1125 11:25:30.237070 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:25:30 crc kubenswrapper[4769]: E1125 11:25:30.238422 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:25:41 crc kubenswrapper[4769]: I1125 11:25:41.238403 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:25:41 crc kubenswrapper[4769]: E1125 11:25:41.239264 4769 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-98mzt_openshift-machine-config-operator(d58c71b5-5dc4-45c1-9b58-9740a35d2256)\"" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" podUID="d58c71b5-5dc4-45c1-9b58-9740a35d2256" Nov 25 11:25:53 crc kubenswrapper[4769]: I1125 11:25:53.237614 4769 scope.go:117] "RemoveContainer" containerID="244f1becf8ebb9eafc0f61d6d968181865e3a7575ad5fa6d09cfbb219ff266b8" Nov 25 11:25:54 crc kubenswrapper[4769]: I1125 11:25:54.058103 4769 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-98mzt" event={"ID":"d58c71b5-5dc4-45c1-9b58-9740a35d2256","Type":"ContainerStarted","Data":"fce95231d9cabd25275e231fd9ce9b1be600f80ce4cfdc46c94eb1fb59d1f371"} Nov 25 11:26:53 crc kubenswrapper[4769]: I1125 11:26:53.096226 4769 scope.go:117] "RemoveContainer" containerID="5f004a7dec50e74bef080a4e11cb4a44718e471d14a69998e7daceb238a6c7ee" Nov 25 11:26:53 crc kubenswrapper[4769]: I1125 11:26:53.134642 4769 scope.go:117] "RemoveContainer" containerID="dff5f0967a13e4fce82f4f18d7285ff11d274ff12a5f63bbb2dbd9fba3e5142d" Nov 25 11:26:53 crc kubenswrapper[4769]: I1125 11:26:53.193867 4769 scope.go:117] "RemoveContainer" containerID="e352bc6b25b3c3ef7a117326aebe01e420c83fe2173718816e087282291ca731" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111311220024430 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111311221017346 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111274527016512 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111274527015462 5ustar corecore